hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c32bcec0553e370ac4baffa77df6bcc9051b48b | 397 | py | Python | tests/test_binomial.py | fau-klue/pandas-association-measures | 80625936d8c25afc81810a5c82b2b7fff66a375c | [
"MIT"
] | 5 | 2020-07-30T08:17:53.000Z | 2021-12-09T09:00:03.000Z | tests/test_binomial.py | fau-klue/pandas-association-measures | 80625936d8c25afc81810a5c82b2b7fff66a375c | [
"MIT"
] | 11 | 2019-04-14T21:28:25.000Z | 2021-12-07T10:35:49.000Z | tests/test_binomial.py | fau-klue/pandas-association-measures | 80625936d8c25afc81810a5c82b2b7fff66a375c | [
"MIT"
] | null | null | null | import pytest
from association_measures import binomial as bi
@pytest.mark.binomial
def test_choose():
assert bi.choose(9, 3) == 84
assert bi.choose(5, 2) == 10
assert bi.choose(2, 2) == 1
assert bi.choose(0, 0) == 1
assert bi.choose(-3, 5) == 0
assert bi.choose(-2, -2) == 0
assert bi.choose(10, -2) == 0
assert bi.choose(1000, 500) == 2.7028824094543644e+299
| 23.352941 | 58 | 0.629723 | import pytest
from association_measures import binomial as bi
@pytest.mark.binomial
def test_choose():
assert bi.choose(9, 3) == 84
assert bi.choose(5, 2) == 10
assert bi.choose(2, 2) == 1
assert bi.choose(0, 0) == 1
assert bi.choose(-3, 5) == 0
assert bi.choose(-2, -2) == 0
assert bi.choose(10, -2) == 0
assert bi.choose(1000, 500) == 2.7028824094543644e+299
| true | true |
1c32be0998b0394d9fa7382a2b7e68a4e0aa3b72 | 2,962 | py | Python | src/ktop/widget_dashboard.py | deathbeds/ktop | b9ff55af75980526337ea1903bda83ccf1bf2f1e | [
"BSD-3-Clause"
] | null | null | null | src/ktop/widget_dashboard.py | deathbeds/ktop | b9ff55af75980526337ea1903bda83ccf1bf2f1e | [
"BSD-3-Clause"
] | 1 | 2018-01-19T17:11:40.000Z | 2018-01-19T17:11:40.000Z | src/ktop/widget_dashboard.py | deathbeds/ktop | b9ff55af75980526337ea1903bda83ccf1bf2f1e | [
"BSD-3-Clause"
] | null | null | null | import traitlets as T
import ipywidgets as W
class DefaultKernelView(W.VBox):
""" Show a kernel
"""
def __init__(self, *args, **kwargs):
kernel = kwargs.pop("kernel")
super(DefaultKernelView, self).__init__(*args, **kwargs)
progress = W.FloatProgress(min=0.0, max=1.0)
widgets = W.VBox()
shutdown = W.Button(icon="trash")
rerun = W.Button(icon="play")
save = W.Button(icon="floppy-o")
file_name = W.Text(description="📓", placeholder="Notebook Name")
# style
for btn in [shutdown, save, rerun]:
btn.layout.max_width = btn.layout.min_width = "3em"
widgets.layout.flex = "2"
# events
shutdown.on_click(lambda *x: kernel.shutdown())
rerun.on_click(lambda *x: kernel.rerun())
save.on_click(lambda *x: kernel.save())
# links
T.dlink((kernel, "execution_state"), (progress, "description"))
T.dlink((kernel, "progress"), (progress, "value"))
T.dlink((kernel, "file_name"), (file_name, "value"))
T.dlink((kernel, "widgets"), (widgets, "children"),
lambda widgets: [
w for w in widgets
if "layout" in w.trait_names()])
self.children = [
W.HBox([
file_name,
save,
progress,
rerun,
shutdown,
]),
widgets,
]
class DefaultNotebookView(W.HBox):
""" Show a Notebook and all of its kernels
"""
def __init__(self, *args, **kwargs):
notebook = kwargs.pop("notebook")
super(DefaultNotebookView, self).__init__(*args, **kwargs)
name = W.Text()
kernels = W.VBox()
cells = W.VBox()
save = W.Button(icon="floppy-o")
load = W.Button(icon="refresh")
for btn in [save, load]:
btn.layout.max_width = btn.layout.min_width = "3em"
name.layout.flex = "1"
# events
save.on_click(lambda *x: notebook.save())
load.on_click(lambda *x: notebook.load())
def _make_children(ipynb):
return [
W.HTML("<code>\n{}\n</code>".format(
"\n".join(cell.get("source", []))))
for cell in ipynb["cells"]
]
T.link((notebook, "file_name"), (name, "value"))
T.dlink((notebook, "kernels"), (kernels, "children"),
lambda children: [c.view() for c in children])
T.dlink((notebook, "ipynb"), (cells, "children"), _make_children)
left = W.VBox([
W.HBox([
name,
save,
load,
]),
cells,
])
right = W.VBox([
kernels,
])
left.layout.flex = "1"
right.layout.flex = "1.6"
self.children = [
left,
right,
]
| 27.425926 | 73 | 0.497974 | import traitlets as T
import ipywidgets as W
class DefaultKernelView(W.VBox):
def __init__(self, *args, **kwargs):
kernel = kwargs.pop("kernel")
super(DefaultKernelView, self).__init__(*args, **kwargs)
progress = W.FloatProgress(min=0.0, max=1.0)
widgets = W.VBox()
shutdown = W.Button(icon="trash")
rerun = W.Button(icon="play")
save = W.Button(icon="floppy-o")
file_name = W.Text(description="📓", placeholder="Notebook Name")
for btn in [shutdown, save, rerun]:
btn.layout.max_width = btn.layout.min_width = "3em"
widgets.layout.flex = "2"
shutdown.on_click(lambda *x: kernel.shutdown())
rerun.on_click(lambda *x: kernel.rerun())
save.on_click(lambda *x: kernel.save())
T.dlink((kernel, "execution_state"), (progress, "description"))
T.dlink((kernel, "progress"), (progress, "value"))
T.dlink((kernel, "file_name"), (file_name, "value"))
T.dlink((kernel, "widgets"), (widgets, "children"),
lambda widgets: [
w for w in widgets
if "layout" in w.trait_names()])
self.children = [
W.HBox([
file_name,
save,
progress,
rerun,
shutdown,
]),
widgets,
]
class DefaultNotebookView(W.HBox):
def __init__(self, *args, **kwargs):
notebook = kwargs.pop("notebook")
super(DefaultNotebookView, self).__init__(*args, **kwargs)
name = W.Text()
kernels = W.VBox()
cells = W.VBox()
save = W.Button(icon="floppy-o")
load = W.Button(icon="refresh")
for btn in [save, load]:
btn.layout.max_width = btn.layout.min_width = "3em"
name.layout.flex = "1"
save.on_click(lambda *x: notebook.save())
load.on_click(lambda *x: notebook.load())
def _make_children(ipynb):
return [
W.HTML("<code>\n{}\n</code>".format(
"\n".join(cell.get("source", []))))
for cell in ipynb["cells"]
]
T.link((notebook, "file_name"), (name, "value"))
T.dlink((notebook, "kernels"), (kernels, "children"),
lambda children: [c.view() for c in children])
T.dlink((notebook, "ipynb"), (cells, "children"), _make_children)
left = W.VBox([
W.HBox([
name,
save,
load,
]),
cells,
])
right = W.VBox([
kernels,
])
left.layout.flex = "1"
right.layout.flex = "1.6"
self.children = [
left,
right,
]
| true | true |
1c32bef877f7c9cf27ab6520cf26b17fc8852996 | 5,118 | py | Python | zodiac_circle_v2.2.py | DCRHallum/Zodiac-Circle | 9a121f982bd2a813e4fa8aee725b7949703f2564 | [
"CC0-1.0"
] | null | null | null | zodiac_circle_v2.2.py | DCRHallum/Zodiac-Circle | 9a121f982bd2a813e4fa8aee725b7949703f2564 | [
"CC0-1.0"
] | null | null | null | zodiac_circle_v2.2.py | DCRHallum/Zodiac-Circle | 9a121f982bd2a813e4fa8aee725b7949703f2564 | [
"CC0-1.0"
] | null | null | null | zodiac_signs = ['aries','taurus','gemini','cancer','leo','virgo','libra','scorpio','sagittarius','capricorn','aquarius','pisces']
fin_zodiac_signs = ['Aries','Taurus','Gemini','Cancer','Leo','Virgo','Libra','Scorpio','Sagittarius','Capricorn','Aquarius','Pisces']
houses = ['Mars','Venus','Mercury','The Moon','The Sun','Mercury','Venus','Mars','Jupiter','Saturn','Saturn','Jupiter','Mars']
exaltations = {33:'The Moon',19:'The Sun',105:'Jupiter',165:'Mercury',201:'Saturn',200:'Saturn according to Pliny, Firmicus Maternus and Varāhamihira',298:'Mars',357:'Venus'}
dejections = {199:'The Sun',213:'The Moon',21:'Saturn',20:'Saturn according to Pliny, Firmicus Maternus and Varāhamihira',108:'Mars',285:'Jupiter',147:'Venus',345:'Mercury'}
house_exaltations = {'aries':'The Sun','taurus':'The Moon','libra':'Saturn','cancer':'Jupiter','capricorn':'Mars','pisces':'Venus','virgo':'Mercury'}
house_dejections = {'libra':'The Sun','scorpio':'The Moon','aries':'Saturn','capricorn':'Jupiter','cancer':'Mars','virgo':'Venus','pisces':'Mercury'}
rep = True
while rep:
#START POINT
print('\nStart Point\n')
init_sign = 'a'
while init_sign.lower() not in zodiac_signs:
init_sign = input('Sign: ')
init_sign = init_sign.lower()
init_house = houses[zodiac_signs.index(init_sign)]
print('(House of '+init_house+')')
#house exaltations/dejections
try:
house_ex_dej = house_exaltations[init_sign]
house_ex_dej = ' exaltation of '+house_ex_dej
print('(House of the'+house_ex_dej+')')
except:
pass
try:
house_ex_dej = house_dejections[init_sign]
house_ex_dej = ' dejection of '+house_ex_dej
print('(House of the'+house_ex_dej+')')
except:
pass
init_degrees = '1'
while True:
init_degrees_str = input('Degree: ')
if init_degrees_str.isdigit():
init_degrees = int(init_degrees_str)
if 0 < init_degrees < 31:
process = 1 #casts by degrees
init_total_degrees = (((zodiac_signs.index(init_sign))*30)+init_degrees) #number of 30s + input degrees
#exaltation or dejection
try:
init_ex_dej = exaltations[init_total_degrees]
print('(Exaltation of '+init_ex_dej+')')
except:
try:
init_ex_dej = dejections[init_total_degrees]
print('(Dejection of '+init_ex_dej+')')
except:
init_ex_dej = ''
break
elif init_degrees_str == '':
process = 2 #casts by sign
break
#Cast
print('\nCast\n')
while True:
cast_str = input('Number: ')
if cast_str.isdigit():
cast = int(cast_str)
break
cast_direction = 'a'
while cast_direction.upper() != 'F' and cast_direction.upper() != 'B':
cast_direction = input('Forwards or backwards (f/b)? ')
if cast_direction.upper() == 'B':
cast = 0 - (cast-2)
#ACTUAL PROCESS
if process == 1: #cast by degree
init_total_degrees = (((zodiac_signs.index(init_sign))*30)+init_degrees) #number of 30s + input degrees
fin_total_degrees = (init_total_degrees + (cast-1)) #when casting you add by 1 less
fin_sign = fin_zodiac_signs[(((((fin_total_degrees-1) % 360)+1)-1) // 30)]
fin_house = houses[((((fin_total_degrees-1) % 360)+1) // 30)]
fin_degrees = ((fin_total_degrees-1) % 30)+1
#exaltation or dejection
try:
ex_dej = exaltations[fin_total_degrees%360]
ex_dej = '(Exaltation of '+ex_dej+')'
except:
try:
ex_dej = dejections[fin_total_degrees%360]
ex_dej = '(Dejection of '+ex_dej+')'
except:
ex_dej = ''
#house exaltations or dejections
try:
fin_house_ex = house_exaltations[fin_sign.lower()]
fin_house_ex = '(House of the exaltation of '+fin_house_ex+') '
except:
fin_house_ex = ''
try:
fin_house_dej = house_dejections[fin_sign.lower()]
fin_house_dej = '(House of the dejection of '+fin_house_dej+') '
except:
fin_house_dej = ''
print('\nEnd Point\n')
print('Sign: '+fin_sign+' (House of '+fin_house+')')
print(fin_house_ex+fin_house_dej)
print('Degrees: '+str(fin_degrees)+'° '+ex_dej)
print('\n')
else: #cast by sign
init_total_degrees = (((zodiac_signs.index(init_sign))*30)) #number of 30s + input degrees
fin_total_degrees = (init_total_degrees + ((cast-1)*30)) #when casting you add by 1 less
fin_sign = fin_zodiac_signs[((((((fin_total_degrees-1) % 360)+1))) // 30)]
fin_house = houses[((((((fin_total_degrees-1) % 360)+1))) // 30)]
#exaltation or dejection
#house exaltations or dejections
try:
fin_house_ex = house_exaltations[fin_sign.lower()]
fin_house_ex = '(House of the exaltation of '+fin_house_ex+') '
except:
fin_house_ex = ''
try:
fin_house_dej = house_dejections[fin_sign.lower()]
fin_house_dej = '(House of the dejection of '+fin_house_dej+') '
except:
fin_house_dej = ''
print('\nEnd Point\n')
print('Sign: '+fin_sign+' (House of '+fin_house+')')
print(fin_house_ex+fin_house_dej)
print('\n')
rep_str = 'a'
while rep_str.lower() != 'y' and rep_str.lower() != 'n':
rep_str = input('Repeat program (y/n)? ')
if rep_str == 'n':
rep = False
else:
print('\n\n')
| 33.45098 | 175 | 0.655334 | zodiac_signs = ['aries','taurus','gemini','cancer','leo','virgo','libra','scorpio','sagittarius','capricorn','aquarius','pisces']
fin_zodiac_signs = ['Aries','Taurus','Gemini','Cancer','Leo','Virgo','Libra','Scorpio','Sagittarius','Capricorn','Aquarius','Pisces']
houses = ['Mars','Venus','Mercury','The Moon','The Sun','Mercury','Venus','Mars','Jupiter','Saturn','Saturn','Jupiter','Mars']
exaltations = {33:'The Moon',19:'The Sun',105:'Jupiter',165:'Mercury',201:'Saturn',200:'Saturn according to Pliny, Firmicus Maternus and Varāhamihira',298:'Mars',357:'Venus'}
dejections = {199:'The Sun',213:'The Moon',21:'Saturn',20:'Saturn according to Pliny, Firmicus Maternus and Varāhamihira',108:'Mars',285:'Jupiter',147:'Venus',345:'Mercury'}
house_exaltations = {'aries':'The Sun','taurus':'The Moon','libra':'Saturn','cancer':'Jupiter','capricorn':'Mars','pisces':'Venus','virgo':'Mercury'}
house_dejections = {'libra':'The Sun','scorpio':'The Moon','aries':'Saturn','capricorn':'Jupiter','cancer':'Mars','virgo':'Venus','pisces':'Mercury'}
rep = True
while rep:
print('\nStart Point\n')
init_sign = 'a'
while init_sign.lower() not in zodiac_signs:
init_sign = input('Sign: ')
init_sign = init_sign.lower()
init_house = houses[zodiac_signs.index(init_sign)]
print('(House of '+init_house+')')
try:
house_ex_dej = house_exaltations[init_sign]
house_ex_dej = ' exaltation of '+house_ex_dej
print('(House of the'+house_ex_dej+')')
except:
pass
try:
house_ex_dej = house_dejections[init_sign]
house_ex_dej = ' dejection of '+house_ex_dej
print('(House of the'+house_ex_dej+')')
except:
pass
init_degrees = '1'
while True:
init_degrees_str = input('Degree: ')
if init_degrees_str.isdigit():
init_degrees = int(init_degrees_str)
if 0 < init_degrees < 31:
process = 1
init_total_degrees = (((zodiac_signs.index(init_sign))*30)+init_degrees)
try:
init_ex_dej = exaltations[init_total_degrees]
print('(Exaltation of '+init_ex_dej+')')
except:
try:
init_ex_dej = dejections[init_total_degrees]
print('(Dejection of '+init_ex_dej+')')
except:
init_ex_dej = ''
break
elif init_degrees_str == '':
process = 2
break
print('\nCast\n')
while True:
cast_str = input('Number: ')
if cast_str.isdigit():
cast = int(cast_str)
break
cast_direction = 'a'
while cast_direction.upper() != 'F' and cast_direction.upper() != 'B':
cast_direction = input('Forwards or backwards (f/b)? ')
if cast_direction.upper() == 'B':
cast = 0 - (cast-2)
if process == 1:
init_total_degrees = (((zodiac_signs.index(init_sign))*30)+init_degrees)
fin_total_degrees = (init_total_degrees + (cast-1))
fin_sign = fin_zodiac_signs[(((((fin_total_degrees-1) % 360)+1)-1) // 30)]
fin_house = houses[((((fin_total_degrees-1) % 360)+1) // 30)]
fin_degrees = ((fin_total_degrees-1) % 30)+1
try:
ex_dej = exaltations[fin_total_degrees%360]
ex_dej = '(Exaltation of '+ex_dej+')'
except:
try:
ex_dej = dejections[fin_total_degrees%360]
ex_dej = '(Dejection of '+ex_dej+')'
except:
ex_dej = ''
try:
fin_house_ex = house_exaltations[fin_sign.lower()]
fin_house_ex = '(House of the exaltation of '+fin_house_ex+') '
except:
fin_house_ex = ''
try:
fin_house_dej = house_dejections[fin_sign.lower()]
fin_house_dej = '(House of the dejection of '+fin_house_dej+') '
except:
fin_house_dej = ''
print('\nEnd Point\n')
print('Sign: '+fin_sign+' (House of '+fin_house+')')
print(fin_house_ex+fin_house_dej)
print('Degrees: '+str(fin_degrees)+'° '+ex_dej)
print('\n')
else:
init_total_degrees = (((zodiac_signs.index(init_sign))*30))
fin_total_degrees = (init_total_degrees + ((cast-1)*30))
fin_sign = fin_zodiac_signs[((((((fin_total_degrees-1) % 360)+1))) // 30)]
fin_house = houses[((((((fin_total_degrees-1) % 360)+1))) // 30)]
try:
fin_house_ex = house_exaltations[fin_sign.lower()]
fin_house_ex = '(House of the exaltation of '+fin_house_ex+') '
except:
fin_house_ex = ''
try:
fin_house_dej = house_dejections[fin_sign.lower()]
fin_house_dej = '(House of the dejection of '+fin_house_dej+') '
except:
fin_house_dej = ''
print('\nEnd Point\n')
print('Sign: '+fin_sign+' (House of '+fin_house+')')
print(fin_house_ex+fin_house_dej)
print('\n')
rep_str = 'a'
while rep_str.lower() != 'y' and rep_str.lower() != 'n':
rep_str = input('Repeat program (y/n)? ')
if rep_str == 'n':
rep = False
else:
print('\n\n')
| true | true |
1c32c03775f8c3d36f64ba55ad12508fc9bcf5e2 | 4,699 | py | Python | createaddr.py | xcbtrader/multi-btc-address-create | 7904b30dfe9457cd669bd113e28fb74ec1be3350 | [
"MIT"
] | null | null | null | createaddr.py | xcbtrader/multi-btc-address-create | 7904b30dfe9457cd669bd113e28fb74ec1be3350 | [
"MIT"
] | null | null | null | createaddr.py | xcbtrader/multi-btc-address-create | 7904b30dfe9457cd669bd113e28fb74ec1be3350 | [
"MIT"
] | null | null | null | __author__ = 'deunido'
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bitcoin import *
from tkinter import *
from tkinter import ttk, font
import sys
import requests
import json
class Aplicacion():
def __init__(self):
self.raiz = Tk()
self.raiz.geometry("700x305")
self.raiz.resizable(width=False, height=False)
self.raiz.title("Multi Create Addr Win 1.0")
self.fuente = font.Font(weight='bold', size=11)
self.etqText = ttk.Label(self.raiz, text= 'TEXTO PARA CREAR ADDR', font=self.fuente)
self.etqAddr = ttk.Label(self.raiz, text= 'DIRECCION BTC', font=self.fuente)
self.etqPriv = ttk.Label(self.raiz, text= 'PRIVADA',font=self.fuente)
self.etqWif = ttk.Label(self.raiz, text= 'WIF PARA IMPORTAR',font=self.fuente)
self.etqSaldo = ttk.Label(self.raiz, text= 'SALDO-TOT.RECIB.',font=self.fuente)
self.etqLin = ttk.Label(self.raiz, text= '--------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
self.msgText = StringVar()
self.msgText.set('')
self.msgAddr = StringVar()
self.msgAddr.set('')
self.msgPriv = StringVar()
self.msgPriv.set('')
self.msgWif = StringVar()
self.msgWif.set('')
self.msgSaldo = DoubleVar()
self.msgSaldo.set(0.0)
self.msgTotRec = DoubleVar()
self.msgTotRec.set(0.0)
self.tText = ttk.Entry(self.raiz,textvariable=self.msgText,justify= 'center',width=67,font=self.fuente)
self.tAddr = ttk.Entry(self.raiz,textvariable=self.msgAddr,justify= 'center',width=50,font=self.fuente)
self.tPriv = ttk.Entry(self.raiz,textvariable=self.msgPriv,justify= 'center',width=67,font=self.fuente)
self.tWif = ttk.Entry(self.raiz,textvariable=self.msgWif,justify= 'center',width=67,font=self.fuente)
self.tSaldo = ttk.Label(self.raiz,textvariable=str(self.msgSaldo),justify= 'center',font=self.fuente)
self.tTotRec = ttk.Label(self.raiz,textvariable=str(self.msgTotRec),justify= 'center',font=self.fuente)
self.BotAddrText = ttk.Button(self.raiz, text="ADDR <-> TEXT", padding=(5,5), command=self.crear_addr_text)
self.BotAddrAleat = ttk.Button(self.raiz, text="ADDR <-> ALEAT", padding=(5,5), command=self.crear_addr_aleat)
self.BotAddrSaldo = ttk.Button(self.raiz, text="SALDO", padding=(5,5), command=self.b1)
self.BotAddrGuardar = ttk.Button(self.raiz, text="GUARDAR", padding=(5,5), command=self.guardar)
self.BotInicializar = ttk.Button(self.raiz, text="INICIALIZAR", padding=(5,5), command=self.inicializar)
self.BotSalir = ttk.Button(self.raiz, text="SALIR", padding=(5,5), command=quit)
self.etqText.place(x=220, y=10)
self.tText.place(x=10, y=30)
self.etqAddr.place(x=180, y=65)
self.tAddr.place(x=10, y=85)
self.etqSaldo.place(x=530, y=65)
self.tSaldo.place(x=540, y=85)
self.tTotRec.place(x=540, y=105)
self.etqPriv.place(x=300, y=125)
self.tPriv.place(x=10, y=145)
self.etqWif.place(x=260, y=185)
self.tWif.place(x=10, y=205)
self.etqLin.place(x=10, y=240)
self.BotAddrText.place(x=20, y=260)
self.BotAddrAleat.place(x=150, y=260)
self.BotAddrSaldo.place(x=285, y=260)
self.BotAddrGuardar.place(x=388, y=260)
self.BotInicializar.place(x=492, y=260)
self.BotSalir.place(x=595, y=260)
self.raiz.mainloop()
def crear_addr_aleat(self):
priv = random_key()
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
self.msgAddr.set(addr)
self.msgPriv.set(priv)
self.msgWif.set(wif)
def crear_addr_text(self):
priv = sha256(self.msgText.get())
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
self.msgAddr.set(addr)
self.msgPriv.set(priv)
self.msgWif.set(wif)
def guardar(self):
if self.msgAddr.get() != '':
f = open(self.msgAddr.get() + '.dat', 'w')
f.write('WIF: ' + self.msgWif.get() + '\n')
f.write('PRIV: ' + self.msgPriv.get() + '\n')
f.write('ADDR: ' + self.msgAddr.get() + '\n')
f.write('SALDO: ' + str(self.msgSaldo.get()))
f.close()
def b1(self):
try:
request = 'http://btc.blockr.io/api/v1/address/info/' + self.msgAddr.get()
response = requests.get(request, timeout=10)
content = response.json()
content1 = content['data'] ['balance']
content2 = content['data'] ['totalreceived']
self.msgSaldo.set(content1)
self.msgTotRec.set(content2)
except KeyboardInterrupt:
exit()
except Exception:
self.msgSaldo.set(-1)
self.msgTotRec.set(-1)
def inicializar(self):
self.msgText.set('')
self.msgAddr.set('')
self.msgPriv.set('')
self.msgWif.set('')
self.msgSaldo.set(0.0)
def main():
mi_app = Aplicacion()
return 0
if __name__ == '__main__':
main()
| 34.807407 | 216 | 0.662269 | __author__ = 'deunido'
from bitcoin import *
from tkinter import *
from tkinter import ttk, font
import sys
import requests
import json
class Aplicacion():
def __init__(self):
self.raiz = Tk()
self.raiz.geometry("700x305")
self.raiz.resizable(width=False, height=False)
self.raiz.title("Multi Create Addr Win 1.0")
self.fuente = font.Font(weight='bold', size=11)
self.etqText = ttk.Label(self.raiz, text= 'TEXTO PARA CREAR ADDR', font=self.fuente)
self.etqAddr = ttk.Label(self.raiz, text= 'DIRECCION BTC', font=self.fuente)
self.etqPriv = ttk.Label(self.raiz, text= 'PRIVADA',font=self.fuente)
self.etqWif = ttk.Label(self.raiz, text= 'WIF PARA IMPORTAR',font=self.fuente)
self.etqSaldo = ttk.Label(self.raiz, text= 'SALDO-TOT.RECIB.',font=self.fuente)
self.etqLin = ttk.Label(self.raiz, text= '--------------------------------------------------------------------------------------------------------------------------------------------------------------------------')
self.msgText = StringVar()
self.msgText.set('')
self.msgAddr = StringVar()
self.msgAddr.set('')
self.msgPriv = StringVar()
self.msgPriv.set('')
self.msgWif = StringVar()
self.msgWif.set('')
self.msgSaldo = DoubleVar()
self.msgSaldo.set(0.0)
self.msgTotRec = DoubleVar()
self.msgTotRec.set(0.0)
self.tText = ttk.Entry(self.raiz,textvariable=self.msgText,justify= 'center',width=67,font=self.fuente)
self.tAddr = ttk.Entry(self.raiz,textvariable=self.msgAddr,justify= 'center',width=50,font=self.fuente)
self.tPriv = ttk.Entry(self.raiz,textvariable=self.msgPriv,justify= 'center',width=67,font=self.fuente)
self.tWif = ttk.Entry(self.raiz,textvariable=self.msgWif,justify= 'center',width=67,font=self.fuente)
self.tSaldo = ttk.Label(self.raiz,textvariable=str(self.msgSaldo),justify= 'center',font=self.fuente)
self.tTotRec = ttk.Label(self.raiz,textvariable=str(self.msgTotRec),justify= 'center',font=self.fuente)
self.BotAddrText = ttk.Button(self.raiz, text="ADDR <-> TEXT", padding=(5,5), command=self.crear_addr_text)
self.BotAddrAleat = ttk.Button(self.raiz, text="ADDR <-> ALEAT", padding=(5,5), command=self.crear_addr_aleat)
self.BotAddrSaldo = ttk.Button(self.raiz, text="SALDO", padding=(5,5), command=self.b1)
self.BotAddrGuardar = ttk.Button(self.raiz, text="GUARDAR", padding=(5,5), command=self.guardar)
self.BotInicializar = ttk.Button(self.raiz, text="INICIALIZAR", padding=(5,5), command=self.inicializar)
self.BotSalir = ttk.Button(self.raiz, text="SALIR", padding=(5,5), command=quit)
self.etqText.place(x=220, y=10)
self.tText.place(x=10, y=30)
self.etqAddr.place(x=180, y=65)
self.tAddr.place(x=10, y=85)
self.etqSaldo.place(x=530, y=65)
self.tSaldo.place(x=540, y=85)
self.tTotRec.place(x=540, y=105)
self.etqPriv.place(x=300, y=125)
self.tPriv.place(x=10, y=145)
self.etqWif.place(x=260, y=185)
self.tWif.place(x=10, y=205)
self.etqLin.place(x=10, y=240)
self.BotAddrText.place(x=20, y=260)
self.BotAddrAleat.place(x=150, y=260)
self.BotAddrSaldo.place(x=285, y=260)
self.BotAddrGuardar.place(x=388, y=260)
self.BotInicializar.place(x=492, y=260)
self.BotSalir.place(x=595, y=260)
self.raiz.mainloop()
def crear_addr_aleat(self):
priv = random_key()
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
self.msgAddr.set(addr)
self.msgPriv.set(priv)
self.msgWif.set(wif)
def crear_addr_text(self):
priv = sha256(self.msgText.get())
pub = privtopub(priv)
addr = pubtoaddr(pub)
wif = encode_privkey(priv, 'wif')
self.msgAddr.set(addr)
self.msgPriv.set(priv)
self.msgWif.set(wif)
def guardar(self):
if self.msgAddr.get() != '':
f = open(self.msgAddr.get() + '.dat', 'w')
f.write('WIF: ' + self.msgWif.get() + '\n')
f.write('PRIV: ' + self.msgPriv.get() + '\n')
f.write('ADDR: ' + self.msgAddr.get() + '\n')
f.write('SALDO: ' + str(self.msgSaldo.get()))
f.close()
def b1(self):
try:
request = 'http://btc.blockr.io/api/v1/address/info/' + self.msgAddr.get()
response = requests.get(request, timeout=10)
content = response.json()
content1 = content['data'] ['balance']
content2 = content['data'] ['totalreceived']
self.msgSaldo.set(content1)
self.msgTotRec.set(content2)
except KeyboardInterrupt:
exit()
except Exception:
self.msgSaldo.set(-1)
self.msgTotRec.set(-1)
def inicializar(self):
self.msgText.set('')
self.msgAddr.set('')
self.msgPriv.set('')
self.msgWif.set('')
self.msgSaldo.set(0.0)
def main():
mi_app = Aplicacion()
return 0
if __name__ == '__main__':
main()
| true | true |
1c32c2c431f709326d664e14c7f2c6b0b119b490 | 1,031 | py | Python | detect_mask_and_ocr.py | isomorphicdude/Highlighted-Text-OCR | 07557bf77172b5e411f83352d8c4a6ba1b46fe6a | [
"MIT"
] | null | null | null | detect_mask_and_ocr.py | isomorphicdude/Highlighted-Text-OCR | 07557bf77172b5e411f83352d8c4a6ba1b46fe6a | [
"MIT"
] | null | null | null | detect_mask_and_ocr.py | isomorphicdude/Highlighted-Text-OCR | 07557bf77172b5e411f83352d8c4a6ba1b46fe6a | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import torch
import easyocr
# Reading the image
img = cv2.imread('image.jpg')
#define kernel size
kernel = np.ones((7,7),np.uint8)
# convert to hsv colorspace
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# lower bound and upper bound for Green color
# lower_bound = np.array([50, 20, 20])
# upper_bound = np.array([100, 255, 255])
# lower bound and upper bound for Yellow color
lower_bound = np.array([20, 80, 80])
upper_bound = np.array([30, 255, 255])
# find the colors within the boundaries
mask = cv2.inRange(hsv, lower_bound, upper_bound)
# Remove unnecessary noise from mask
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# Segment only the detected region
segmented_img = cv2.bitwise_and(img, img, mask=mask)
output = cv2.resize(segmented_img, (960, 540))
cv2.imwrite('modified',output)
reader = easyocr.Reader(['de', 'en'])
result = reader.readtext('modified.jpg') | 25.146341 | 55 | 0.694471 | import cv2
import numpy as np
import torch
import easyocr
img = cv2.imread('image.jpg')
kernel = np.ones((7,7),np.uint8)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_bound = np.array([20, 80, 80])
upper_bound = np.array([30, 255, 255])
mask = cv2.inRange(hsv, lower_bound, upper_bound)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
segmented_img = cv2.bitwise_and(img, img, mask=mask)
output = cv2.resize(segmented_img, (960, 540))
cv2.imwrite('modified',output)
reader = easyocr.Reader(['de', 'en'])
result = reader.readtext('modified.jpg') | true | true |
1c32c3186b313f3e843531f74356fcc5f1147f9b | 45,071 | py | Python | pkgs/conf-pkg/src/genie/libs/conf/l2vpn/iosxr/bridge_domain.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 94 | 2018-04-30T20:29:15.000Z | 2022-03-29T13:40:31.000Z | pkgs/conf-pkg/src/genie/libs/conf/l2vpn/iosxr/bridge_domain.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 67 | 2018-12-06T21:08:09.000Z | 2022-03-29T18:00:46.000Z | pkgs/conf-pkg/src/genie/libs/conf/l2vpn/iosxr/bridge_domain.py | miott/genielibs | 6464642cdd67aa2367bdbb12561af4bb060e5e62 | [
"Apache-2.0"
] | 49 | 2018-06-29T18:59:03.000Z | 2022-03-10T02:07:59.000Z |
from abc import ABC
import warnings
import contextlib
from genie.conf.base.attributes import UnsupportedAttributeWarning,\
AttributesHelper
from genie.conf.base.cli import CliConfigBuilder
from genie.conf.base.config import CliConfig
from genie.libs.conf.interface import BviInterface
from genie.libs.conf.l2vpn.pseudowire import PseudowireNeighbor,\
PseudowireIPv4Neighbor, PseudowireEviNeighbor
class BridgeDomain(ABC):
class DeviceAttributes(ABC):
class InterfaceAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 (config-l2vpn-bg-bd)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / routed interface BVI1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 (config-l2vpn-bg-bd-ac)
with configurations.submode_context(
attributes.format(
'routed interface {interface_name}' if isinstance(self.interface, BviInterface) else 'interface {interface_name}',
force=True),
exit_cmd='' if isinstance(self.interface, BviInterface) else 'exit', # routed interface may not be an actual submode
):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
if isinstance(self.interface, BviInterface):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / routed interface BVI1 / split-horizon group core
v = attributes.value('split_horizon_group_core')
if v is True:
configurations.append_line('split-horizon group core')
if configurations:
# There are configurations... It must be a submode; exit.
configurations.append_line('exit', raw=True)
else:
# There are no configurations... May not be be a submode; Don't exit.
pass
else:
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dhcp ipv4 none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dhcp ipv4 snoop profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection (config-l2vpn-bg-bd-ac-dai)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation (config-l2vpn-bg-bd-ac-dai-av)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / dst-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / dst-mac disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / ipv4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / ipv4 disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / src-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / src-mac disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding unknown-unicast
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding unknown-unicast disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / igmp snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard (config-l2vpn-bg-bd-ac-ipsg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac (config-l2vpn-bg-bd-ac-mac)
sub, attributes2 = attributes.namespace('mac')
if sub is not None:
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac (config-l2vpn-bg-bd-ac-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mld snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / split-horizon group
v = attributes.value('split_horizon_group')
if v is True:
configurations.append_line('split-horizon group')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / static-mac-address aaaa.bbbb.cccc
configurations.append_line(attributes.format('static-mac-address {static_mac_address}'))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control broadcast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control broadcast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control multicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control multicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control unknown-unicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control unknown-unicast pps 1
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class MacAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac (config-l2vpn-bg-bd-ac-mac)
with configurations.submode_context('mac', cancel_empty=True):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging (config-l2vpn-bg-bd-ac-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit (config-l2vpn-bg-bd-ac-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / port-down flush
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / port-down flush disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure (config-l2vpn-bg-bd-ac-mac-secure)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / logging disable
pass
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class NeighborAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
nbr_ctx = None
nbr_is_submode = True
if isinstance(self.neighbor, PseudowireIPv4Neighbor):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-pw)
assert self.ip is not None
assert self.pw_id is not None
nbr_ctx = attributes.format('neighbor {ip} pw-id {pw_id}', force=True)
elif isinstance(self.neighbor, PseudowireEviNeighbor):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor evpn 1 target 1
assert self.evi is not None
assert self.ac_id is not None
nbr_ctx = attributes.format('neighbor evpn {evi.evi_id} target {ac_id}', force=True)
nbr_is_submode = False
else:
raise ValueError(self.neighbor)
if not nbr_is_submode:
configurations.append_line(nbr_ctx)
else:
with configurations.submode_context(nbr_ctx):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-pw-backup)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 / pw-class someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / dhcp ipv4 none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / dhcp ipv4 snoop profile someword3
v = attributes.value('dhcp_ipv4_snooping_profile')
if v is not None:
if v is False:
configurations.append_line('dhcp ipv4 none')
else:
configurations.append_line('dhcp ipv4 snoop profile {}'.format(v))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding unknown-unicast
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding unknown-unicast disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / igmp snooping profile someword3
v = attributes.value('igmp_snooping_profile')
if v is not None:
if v is False:
pass
else:
configurations.append_line('igmp snooping profile {}'.format(v))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac (config-l2vpn-bg-bd-pw-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging (config-l2vpn-bg-bd-pw-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit (config-l2vpn-bg-bd-pw-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / port-down flush
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / port-down flush disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure (config-l2vpn-bg-bd-pw-mac-secure)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mld snooping profile someword3
v = attributes.value('mld_snooping_profile')
if v is not None:
if v is False:
pass
else:
configurations.append_line('mld snooping profile {}'.format(v))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mpls static label local 16 remote 16
remote_label = attributes.value('mpls_static_label')
if remote_label is not None:
local_label = self.parent.neighbor_attr[self.remote_neighbor].mpls_static_label
if local_label is None:
warnings.warn(
'remote neighbor {!r} mpls_static_label missing'.format(self.remote_neighbor),
UnsupportedAttributeWarning)
else:
configurations.append_line('mpls static label local {} remote {}'.\
format(local_label, remote_label))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / pw-class someword3
v = attributes.value('pw_class')
if v is not None:
configurations.append_line('pw-class {}'.\
format(v.device_attr[self.device].name))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / split-horizon group
if attributes.value('split_horizon'):
configurations.append_line('split-horizon group')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / static-mac-address aaaa.bbbb.cccc
configurations.append_line(attributes.format('static-mac-address {static_mac_address}'))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control broadcast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control broadcast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control multicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control multicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control unknown-unicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control unknown-unicast pps 1
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class EviAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / evi 1 (config-l2vpn-bg-bd-evi)
with configurations.submode_context(
attributes.format('evi {evi_id}', force=True),
exit_cmd=''): # evi is not a sub-mode in all releases.
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class VniAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 (config-l2vpn-bg-bd)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / member vni 1 (config-l2vpn-bg-bd-vni)
with configurations.submode_context(attributes.format('member vni {vni_id}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class MacAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac (config-l2vpn-bg-bd-mac)
with configurations.submode_context('mac', cancel_empty=True):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging (config-l2vpn-bg-bd-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging / type inactivity
with configurations.submode_context('aging',cancel_empty=True):
configurations.append_line(attributes.format('time {aging_time}'))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / learning disable
v = attributes.value('learning_disable')
if v is True:
configurations.append_line('learning disable')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit (config-l2vpn-bg-bd-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / port-down flush
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / port-down flush disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure (config-l2vpn-bg-bd-mac-secure)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / static-address aaaa.bbbb.cccc drop
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw access-pw disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw optimize
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw relay
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw state-down
pass
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
def build_config(self, apply=True, attributes=None, unconfig=False,
contained=False, **kwargs):
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn (config-l2vpn)
submode_stack = contextlib.ExitStack()
if not contained:
submode_stack.enter_context(
configurations.submode_context('l2vpn'))
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
with configurations.submode_context(attributes.format('bridge group {group_name}', force=True, cancel_empty=True)):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 (config-l2vpn-bg-bd)
with configurations.submode_context(attributes.format('bridge-domain {name}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / coupled-mode
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dhcp ipv4 snoop profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection (config-l2vpn-bg-bd-dai)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation (config-l2vpn-bg-bd-dai-av)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation / dst-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation / ipv4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation / src-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / evi 1 (config-l2vpn-bg-bd-evi)
for sub, attributes2 in attributes.mapping_values('evi_attr', keys=self.evis, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / flooding disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / flooding unknown-unicast disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / igmp snooping disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / igmp snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / routed interface BVI1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 (config-l2vpn-bg-bd-ac)
for sub, attributes2 in attributes.mapping_values('interface_attr', keys=self.interfaces, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / ip-source-guard (config-l2vpn-bg-bd-ipsg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / ip-source-guard / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac (config-l2vpn-bg-bd-mac)
ns, attributes2 = attributes.namespace('mac')
if ns is not None:
configurations.append_block(
ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / member vni 1 (config-l2vpn-bg-bd-vni)
for sub, attributes2 in attributes.mapping_values('vni_attr', keys=self.vnis, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mld snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mtu 100
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-pw)
for sub, attributes2 in attributes.mapping_values('neighbor_attr', keys=self.pseudowire_neighbors, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor evpn evi 1 target 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor evpn evi 1 target 1 source 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / nv satellite (config-l2vpn-bg-bd-nv)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / nv satellite / offload ipv4 multicast enable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core (config-l2vpn-bg-bd-pbb-core)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / evi 1 (config-l2vpn-bg-bd-pbb-core-evi)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac (config-l2vpn-bg-bd-pbb-core-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging (config-l2vpn-bg-bd-pbb-core-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mmrp-flood-optimization
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / rewrite ingress tag push dot1ad 1 symmetric
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 (config-l2vpn-bg-bd-pbb-edge)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / dhcp ipv4 none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / dhcp ipv4 snoop profile someword4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / igmp snooping profile someword4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac (config-l2vpn-bg-bd-pbb-edge-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging (config-l2vpn-bg-bd-pbb-edge-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit (config-l2vpn-bg-bd-pbb-edge-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure (config-l2vpn-bg-bd-pbb-edge-mac-sec)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / accept-shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / split-horizon group vfi disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / static-mac-address aaaa.bbbb.cccc bmac aaaa.bbbb.cccc
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / unknown-unicast-bmac aaaa.bbbb.cccc
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / shutdown
if attributes.value('shutdown'):
configurations.append_line('shutdown')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control broadcast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control broadcast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control multicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control multicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control unknown-unicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control unknown-unicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / transport-mode vlan passthrough
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 (config-l2vpn-bg-bd-vfi)
for vfi, attributes2 in attributes.sequence_values('vfis'):
configurations.append_block(
str(vfi.build_config(apply=False, attributes=attributes2, unconfig=unconfig)))
submode_stack.close()
if apply:
if configurations:
self.device.configure(str(configurations), fail_invalid=True)
else:
return CliConfig(device=self.device, unconfig=unconfig,
cli_config=configurations, fail_invalid=True)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
| 85.039623 | 191 | 0.609305 |
from abc import ABC
import warnings
import contextlib
from genie.conf.base.attributes import UnsupportedAttributeWarning,\
AttributesHelper
from genie.conf.base.cli import CliConfigBuilder
from genie.conf.base.config import CliConfig
from genie.libs.conf.interface import BviInterface
from genie.libs.conf.l2vpn.pseudowire import PseudowireNeighbor,\
PseudowireIPv4Neighbor, PseudowireEviNeighbor
class BridgeDomain(ABC):
class DeviceAttributes(ABC):
class InterfaceAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
with configurations.submode_context(
attributes.format(
'routed interface {interface_name}' if isinstance(self.interface, BviInterface) else 'interface {interface_name}',
force=True),
exit_cmd='' if isinstance(self.interface, BviInterface) else 'exit',
):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
if isinstance(self.interface, BviInterface):
v = attributes.value('split_horizon_group_core')
if v is True:
configurations.append_line('split-horizon group core')
if configurations:
configurations.append_line('exit', raw=True)
else:
pass
else:
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dhcp ipv4 none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dhcp ipv4 snoop profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection (config-l2vpn-bg-bd-ac-dai)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation (config-l2vpn-bg-bd-ac-dai-av)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / dst-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / dst-mac disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / ipv4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / ipv4 disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / src-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / address-validation / src-mac disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / dynamic-arp-inspection / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding unknown-unicast
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / flooding unknown-unicast disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / igmp snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard (config-l2vpn-bg-bd-ac-ipsg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / ip-source-guard / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac (config-l2vpn-bg-bd-ac-mac)
sub, attributes2 = attributes.namespace('mac')
if sub is not None:
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac (config-l2vpn-bg-bd-ac-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mld snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / split-horizon group
v = attributes.value('split_horizon_group')
if v is True:
configurations.append_line('split-horizon group')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / static-mac-address aaaa.bbbb.cccc
configurations.append_line(attributes.format('static-mac-address {static_mac_address}'))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control broadcast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control broadcast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control multicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control multicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control unknown-unicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / storm-control unknown-unicast pps 1
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class MacAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac (config-l2vpn-bg-bd-ac-mac)
with configurations.submode_context('mac', cancel_empty=True):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging (config-l2vpn-bg-bd-ac-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit (config-l2vpn-bg-bd-ac-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / port-down flush
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / port-down flush disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure (config-l2vpn-bg-bd-ac-mac-secure)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 / mac / secure / logging disable
pass
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class NeighborAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False, **kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
nbr_ctx = None
nbr_is_submode = True
if isinstance(self.neighbor, PseudowireIPv4Neighbor):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-pw)
assert self.ip is not None
assert self.pw_id is not None
nbr_ctx = attributes.format('neighbor {ip} pw-id {pw_id}', force=True)
elif isinstance(self.neighbor, PseudowireEviNeighbor):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor evpn 1 target 1
assert self.evi is not None
assert self.ac_id is not None
nbr_ctx = attributes.format('neighbor evpn {evi.evi_id} target {ac_id}', force=True)
nbr_is_submode = False
else:
raise ValueError(self.neighbor)
if not nbr_is_submode:
configurations.append_line(nbr_ctx)
else:
with configurations.submode_context(nbr_ctx):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-pw-backup)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / backup neighbor 1.2.3.4 pw-id 1 / pw-class someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / dhcp ipv4 none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / dhcp ipv4 snoop profile someword3
v = attributes.value('dhcp_ipv4_snooping_profile')
if v is not None:
if v is False:
configurations.append_line('dhcp ipv4 none')
else:
configurations.append_line('dhcp ipv4 snoop profile {}'.format(v))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding unknown-unicast
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / flooding unknown-unicast disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / igmp snooping profile someword3
v = attributes.value('igmp_snooping_profile')
if v is not None:
if v is False:
pass
else:
configurations.append_line('igmp snooping profile {}'.format(v))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac (config-l2vpn-bg-bd-pw-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging (config-l2vpn-bg-bd-pw-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit (config-l2vpn-bg-bd-pw-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / port-down flush
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / port-down flush disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure (config-l2vpn-bg-bd-pw-mac-secure)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mac / secure / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mld snooping profile someword3
v = attributes.value('mld_snooping_profile')
if v is not None:
if v is False:
pass
else:
configurations.append_line('mld snooping profile {}'.format(v))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / mpls static label local 16 remote 16
remote_label = attributes.value('mpls_static_label')
if remote_label is not None:
local_label = self.parent.neighbor_attr[self.remote_neighbor].mpls_static_label
if local_label is None:
warnings.warn(
'remote neighbor {!r} mpls_static_label missing'.format(self.remote_neighbor),
UnsupportedAttributeWarning)
else:
configurations.append_line('mpls static label local {} remote {}'.\
format(local_label, remote_label))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / pw-class someword3
v = attributes.value('pw_class')
if v is not None:
configurations.append_line('pw-class {}'.\
format(v.device_attr[self.device].name))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / split-horizon group
if attributes.value('split_horizon'):
configurations.append_line('split-horizon group')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / static-mac-address aaaa.bbbb.cccc
configurations.append_line(attributes.format('static-mac-address {static_mac_address}'))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control broadcast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control broadcast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control multicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control multicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control unknown-unicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 / storm-control unknown-unicast pps 1
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class EviAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / evi 1 (config-l2vpn-bg-bd-evi)
with configurations.submode_context(
attributes.format('evi {evi_id}', force=True),
exit_cmd=''): # evi is not a sub-mode in all releases.
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class VniAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 (config-l2vpn-bg-bd)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / member vni 1 (config-l2vpn-bg-bd-vni)
with configurations.submode_context(attributes.format('member vni {vni_id}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
class MacAttributes(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
assert not apply
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac (config-l2vpn-bg-bd-mac)
with configurations.submode_context('mac', cancel_empty=True):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging (config-l2vpn-bg-bd-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / aging / type inactivity
with configurations.submode_context('aging',cancel_empty=True):
configurations.append_line(attributes.format('time {aging_time}'))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / learning disable
v = attributes.value('learning_disable')
if v is True:
configurations.append_line('learning disable')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit (config-l2vpn-bg-bd-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / port-down flush
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / port-down flush disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure (config-l2vpn-bg-bd-mac-secure)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / secure / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / static-address aaaa.bbbb.cccc drop
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw access-pw disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw optimize
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw relay
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac / withdraw state-down
pass
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
def build_config(self, apply=True, attributes=None, unconfig=False,
contained=False, **kwargs):
assert not kwargs, kwargs
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# iosxr: l2vpn (config-l2vpn)
submode_stack = contextlib.ExitStack()
if not contained:
submode_stack.enter_context(
configurations.submode_context('l2vpn'))
# iosxr: l2vpn / bridge group someword (config-l2vpn-bg)
with configurations.submode_context(attributes.format('bridge group {group_name}', force=True, cancel_empty=True)):
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 (config-l2vpn-bg-bd)
with configurations.submode_context(attributes.format('bridge-domain {name}', force=True)):
if unconfig and attributes.iswildcard:
configurations.submode_unconfig()
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / coupled-mode
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dhcp ipv4 snoop profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection (config-l2vpn-bg-bd-dai)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation (config-l2vpn-bg-bd-dai-av)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation / dst-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation / ipv4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / address-validation / src-mac
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / dynamic-arp-inspection / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / evi 1 (config-l2vpn-bg-bd-evi)
for sub, attributes2 in attributes.mapping_values('evi_attr', keys=self.evis, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / flooding disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / flooding unknown-unicast disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / igmp snooping disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / igmp snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / routed interface BVI1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / interface Bundle-Ether1 (config-l2vpn-bg-bd-ac)
for sub, attributes2 in attributes.mapping_values('interface_attr', keys=self.interfaces, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / ip-source-guard (config-l2vpn-bg-bd-ipsg)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / ip-source-guard / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mac (config-l2vpn-bg-bd-mac)
ns, attributes2 = attributes.namespace('mac')
if ns is not None:
configurations.append_block(
ns.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / member vni 1 (config-l2vpn-bg-bd-vni)
for sub, attributes2 in attributes.mapping_values('vni_attr', keys=self.vnis, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mld snooping profile someword3
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / mtu 100
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor 1.2.3.4 pw-id 1 (config-l2vpn-bg-bd-pw)
for sub, attributes2 in attributes.mapping_values('neighbor_attr', keys=self.pseudowire_neighbors, sort=True):
configurations.append_block(
sub.build_config(apply=False, attributes=attributes2, unconfig=unconfig))
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor evpn evi 1 target 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / neighbor evpn evi 1 target 1 source 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / nv satellite (config-l2vpn-bg-bd-nv)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / nv satellite / offload ipv4 multicast enable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core (config-l2vpn-bg-bd-pbb-core)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / evi 1 (config-l2vpn-bg-bd-pbb-core-evi)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac (config-l2vpn-bg-bd-pbb-core-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging (config-l2vpn-bg-bd-pbb-core-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / mmrp-flood-optimization
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb core / rewrite ingress tag push dot1ad 1 symmetric
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 (config-l2vpn-bg-bd-pbb-edge)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / dhcp ipv4 none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / dhcp ipv4 snoop profile someword4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / igmp snooping profile someword4
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac (config-l2vpn-bg-bd-pbb-edge-mac)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging (config-l2vpn-bg-bd-pbb-edge-mac-aging)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging / time 300
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging / type absolute
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / aging / type inactivity
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / learning
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / learning disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit (config-l2vpn-bg-bd-pbb-edge-mac-limit)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action no-flood
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / maximum 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification both
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification syslog
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / limit / notification trap
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure (config-l2vpn-bg-bd-pbb-edge-mac-sec)
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / accept-shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / action none
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / action restrict
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / action shutdown
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / logging
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / mac / secure / logging disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / split-horizon group vfi disable
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / static-mac-address aaaa.bbbb.cccc bmac aaaa.bbbb.cccc
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / pbb edge i-sid 256 core-bridge someword3 / unknown-unicast-bmac aaaa.bbbb.cccc
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / shutdown
if attributes.value('shutdown'):
configurations.append_line('shutdown')
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control broadcast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control broadcast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control multicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control multicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control unknown-unicast kbps 64
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / storm-control unknown-unicast pps 1
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / transport-mode vlan passthrough
# iosxr: l2vpn / bridge group someword / bridge-domain someword2 / vfi someword3 (config-l2vpn-bg-bd-vfi)
for vfi, attributes2 in attributes.sequence_values('vfis'):
configurations.append_block(
str(vfi.build_config(apply=False, attributes=attributes2, unconfig=unconfig)))
submode_stack.close()
if apply:
if configurations:
self.device.configure(str(configurations), fail_invalid=True)
else:
return CliConfig(device=self.device, unconfig=unconfig,
cli_config=configurations, fail_invalid=True)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes, unconfig=True, **kwargs)
| true | true |
1c32c430911cda1f536110a33edb83aad3b2f3d6 | 64,678 | py | Python | python/pyspark/pandas/internal.py | amwufiv/spark | b50d4507f52315d5f6d75c617e845248a1c828a9 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 7 | 2015-02-05T10:57:54.000Z | 2022-01-12T08:52:54.000Z | python/pyspark/pandas/internal.py | amwufiv/spark | b50d4507f52315d5f6d75c617e845248a1c828a9 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 27 | 2021-04-14T16:44:15.000Z | 2022-03-13T00:35:17.000Z | python/pyspark/pandas/internal.py | amwufiv/spark | b50d4507f52315d5f6d75c617e845248a1c828a9 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6 | 2022-01-24T20:07:59.000Z | 2022-01-25T16:11:34.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An internal immutable DataFrame with some metadata to manage indexes.
"""
import re
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, TYPE_CHECKING, cast
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype # noqa: F401
from pyspark._globals import _NoValue, _NoValueType
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame, Window
from pyspark.sql.types import ( # noqa: F401
BooleanType,
DataType,
LongType,
StructField,
StructType,
StringType,
)
from pyspark.sql.utils import is_timestamp_ntz_preferred
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps
from pyspark.pandas._typing import Label
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.series import Series
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.data_type_ops.base import DataTypeOps
from pyspark.pandas.typedef import (
Dtype,
as_spark_type,
extension_dtypes,
infer_pd_series_spark_type,
spark_type_to_pandas_dtype,
)
from pyspark.pandas.utils import (
column_labels_level,
default_session,
is_name_like_tuple,
is_testing,
lazy_property,
name_like_string,
scol_for,
spark_column_equals,
)
# A function to turn given numbers to Spark columns that represent pandas-on-Spark index.
SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format
SPARK_DEFAULT_INDEX_NAME = SPARK_INDEX_NAME_FORMAT(0)
# A pattern to check if the name of a Spark column is a pandas-on-Spark index name or not.
SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__")
NATURAL_ORDER_COLUMN_NAME = "__natural_order__"
HIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME}
DEFAULT_SERIES_NAME = 0
SPARK_DEFAULT_SERIES_NAME = str(DEFAULT_SERIES_NAME)
class InternalField:
"""
The internal field to store the dtype as well as the Spark's StructField optionally.
Parameters
----------
dtype : numpy.dtype or pandas' ExtensionDtype
The dtype for the field
struct_field : StructField, optional
The `StructField` for the field. If None, InternalFrame will properly set.
"""
def __init__(self, dtype: Dtype, struct_field: Optional[StructField] = None):
self._dtype = dtype
self._struct_field = struct_field
@staticmethod
def from_struct_field(
struct_field: StructField, *, use_extension_dtypes: bool = False
) -> "InternalField":
"""
Returns a new InternalField object created from the given StructField.
The dtype will be inferred from the data type of the given StructField.
Parameters
----------
struct_field : StructField
The StructField used to create a new InternalField object.
use_extension_dtypes : bool
If True, try to use the extension dtypes.
Returns
-------
InternalField
"""
return InternalField(
dtype=spark_type_to_pandas_dtype(
struct_field.dataType, use_extension_dtypes=use_extension_dtypes
),
struct_field=struct_field,
)
@property
def dtype(self) -> Dtype:
"""Return the dtype for the field."""
return self._dtype
@property
def struct_field(self) -> Optional[StructField]:
"""Return the StructField for the field."""
return self._struct_field
@property
def name(self) -> str:
"""Return the field name if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.name
@property
def spark_type(self) -> DataType:
"""Return the spark data type for the field if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.dataType
@property
def nullable(self) -> bool:
"""Return the nullability for the field if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.nullable
@property
def metadata(self) -> Dict[str, Any]:
"""Return the metadata for the field if the StructField exists."""
assert self.struct_field is not None
return self.struct_field.metadata
@property
def is_extension_dtype(self) -> bool:
"""Return whether the dtype for the field is an extension type or not."""
return isinstance(self.dtype, extension_dtypes)
def normalize_spark_type(self) -> "InternalField":
"""Return a new InternalField object with normalized Spark data type."""
assert self.struct_field is not None
return self.copy(
spark_type=force_decimal_precision_scale(as_nullable_spark_type(self.spark_type)),
nullable=True,
)
def copy(
self,
*,
name: Union[str, _NoValueType] = _NoValue,
dtype: Union[Dtype, _NoValueType] = _NoValue,
spark_type: Union[DataType, _NoValueType] = _NoValue,
nullable: Union[bool, _NoValueType] = _NoValue,
metadata: Union[Optional[Dict[str, Any]], _NoValueType] = _NoValue,
) -> "InternalField":
"""Copy the InternalField object."""
if name is _NoValue:
name = self.name
if dtype is _NoValue:
dtype = self.dtype
if spark_type is _NoValue:
spark_type = self.spark_type
if nullable is _NoValue:
nullable = self.nullable
if metadata is _NoValue:
metadata = self.metadata
return InternalField(
dtype=cast(Dtype, dtype),
struct_field=StructField(
name=cast(str, name),
dataType=cast(DataType, spark_type),
nullable=cast(bool, nullable),
metadata=cast(Optional[Dict[str, Any]], metadata),
),
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, InternalField)
and self.dtype == other.dtype
and self.struct_field == other.struct_field
)
def __repr__(self) -> str:
return "InternalField(dtype={dtype},struct_field={struct_field})".format(
dtype=self.dtype, struct_field=self.struct_field
)
class InternalFrame:
"""
The internal immutable DataFrame which manages Spark DataFrame and column names and index
information.
.. note:: this is an internal class. It is not supposed to be exposed to users and users
should not directly access to it.
The internal immutable DataFrame represents the index information for a DataFrame it belongs to.
For instance, if we have a pandas-on-Spark DataFrame as below, pandas DataFrame does not
store the index as columns.
>>> psdf = ps.DataFrame({
... 'A': [1, 2, 3, 4],
... 'B': [5, 6, 7, 8],
... 'C': [9, 10, 11, 12],
... 'D': [13, 14, 15, 16],
... 'E': [17, 18, 19, 20]}, columns = ['A', 'B', 'C', 'D', 'E'])
>>> psdf # doctest: +NORMALIZE_WHITESPACE
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
However, all columns including index column are also stored in Spark DataFrame internally
as below.
>>> psdf._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
In order to fill this gap, the current metadata is used by mapping Spark's internal column
to pandas-on-Spark's index. See the method below:
* `spark_frame` represents the internal Spark DataFrame
* `data_spark_column_names` represents non-indexing Spark column names
* `data_spark_columns` represents non-indexing Spark columns
* `data_fields` represents non-indexing InternalFields
* `index_spark_column_names` represents internal index Spark column names
* `index_spark_columns` represents internal index Spark columns
* `index_fields` represents index InternalFields
* `spark_column_names` represents all columns
* `index_names` represents the external index name as a label
* `to_internal_spark_frame` represents Spark DataFrame derived by the metadata. Includes index.
* `to_pandas_frame` represents pandas DataFrame derived by the metadata
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None]
>>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField(A,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(B,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(C,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(D,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(E,LongType,false))]
>>> internal.index_fields
[InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame
A B C D E
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
In case that index is set to one of the existing column as below:
>>> psdf1 = psdf.set_index("A")
>>> psdf1 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
>>> psdf1._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal = psdf1._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[('A',)]
>>> internal.data_fields
[InternalField(dtype=int64,struct_field=StructField(B,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(C,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(D,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(E,LongType,false))]
>>> internal.index_fields
[InternalField(dtype=int64,struct_field=StructField(A,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+---+---+
| A| B| C| D| E|
+---+---+---+---+---+
| 1| 5| 9| 13| 17|
| 2| 6| 10| 14| 18|
| 3| 7| 11| 15| 19|
| 4| 8| 12| 16| 20|
+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
1 5 9 13 17
2 6 10 14 18
3 7 11 15 19
4 8 12 16 20
In case that index becomes a multi index as below:
>>> psdf2 = psdf.set_index("A", append=True)
>>> psdf2 # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
>>> psdf2._internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal = psdf2._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B', 'C', 'D', 'E']
>>> internal.index_spark_column_names
['__index_level_0__', 'A']
>>> internal.spark_column_names
['__index_level_0__', 'A', 'B', 'C', 'D', 'E']
>>> internal.index_names
[None, ('A',)]
>>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField(B,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(C,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(D,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(E,LongType,false))]
>>> internal.index_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,false)),
InternalField(dtype=int64,struct_field=StructField(A,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+-----------------+---+---+---+---+---+
|__index_level_0__| A| B| C| D| E|
+-----------------+---+---+---+---+---+
| 0| 1| 5| 9| 13| 17|
| 1| 2| 6| 10| 14| 18|
| 2| 3| 7| 11| 15| 19|
| 3| 4| 8| 12| 16| 20|
+-----------------+---+---+---+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B C D E
A
0 1 5 9 13 17
1 2 6 10 14 18
2 3 7 11 15 19
3 4 8 12 16 20
For multi-level columns, it also holds column_labels
>>> columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'),
... ('Y', 'C'), ('Y', 'D')])
>>> psdf3 = ps.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16],
... [17, 18, 19, 20]], columns = columns)
>>> psdf3 # doctest: +NORMALIZE_WHITESPACE
X Y
A B C D
0 1 2 3 4
1 5 6 7 8
2 9 10 11 12
3 13 14 15 16
4 17 18 19 20
>>> internal = psdf3._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+------+------+------+------+-----------------+
|__index_level_0__|(X, A)|(X, B)|(Y, C)|(Y, D)|__natural_order__|
+-----------------+------+------+------+------+-----------------+
| 0| 1| 2| 3| 4| ...|
| 1| 5| 6| 7| 8| ...|
| 2| 9| 10| 11| 12| ...|
| 3| 13| 14| 15| 16| ...|
| 4| 17| 18| 19| 20| ...|
+-----------------+------+------+------+------+-----------------+
>>> internal.data_spark_column_names
['(X, A)', '(X, B)', '(Y, C)', '(Y, D)']
>>> internal.column_labels
[('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]
For Series, it also holds scol to represent the column.
>>> psseries = psdf1.B
>>> psseries
A
1 5
2 6
3 7
4 8
Name: B, dtype: int64
>>> internal = psseries._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+---+---+---+---+---+-----------------+
|__index_level_0__| A| B| C| D| E|__natural_order__|
+-----------------+---+---+---+---+---+-----------------+
| 0| 1| 5| 9| 13| 17| ...|
| 1| 2| 6| 10| 14| 18| ...|
| 2| 3| 7| 11| 15| 19| ...|
| 3| 4| 8| 12| 16| 20| ...|
+-----------------+---+---+---+---+---+-----------------+
>>> internal.data_spark_column_names
['B']
>>> internal.index_spark_column_names
['A']
>>> internal.spark_column_names
['A', 'B']
>>> internal.index_names
[('A',)]
>>> internal.data_fields
[InternalField(dtype=int64,struct_field=StructField(B,LongType,false))]
>>> internal.index_fields
[InternalField(dtype=int64,struct_field=StructField(A,LongType,false))]
>>> internal.to_internal_spark_frame.show() # doctest: +NORMALIZE_WHITESPACE
+---+---+
| A| B|
+---+---+
| 1| 5|
| 2| 6|
| 3| 7|
| 4| 8|
+---+---+
>>> internal.to_pandas_frame # doctest: +NORMALIZE_WHITESPACE
B
A
1 5
2 6
3 7
4 8
"""
def __init__(
self,
spark_frame: SparkDataFrame,
index_spark_columns: Optional[List[Column]],
index_names: Optional[List[Optional[Label]]] = None,
index_fields: Optional[List[InternalField]] = None,
column_labels: Optional[List[Label]] = None,
data_spark_columns: Optional[List[Column]] = None,
data_fields: Optional[List[InternalField]] = None,
column_label_names: Optional[List[Optional[Label]]] = None,
):
"""
Create a new internal immutable DataFrame to manage Spark DataFrame, column fields and
index fields and names.
:param spark_frame: Spark DataFrame to be managed.
:param index_spark_columns: list of Spark Column
Spark Columns for the index.
:param index_names: list of tuples
the index names.
:param index_fields: list of InternalField
the InternalFields for the index columns
:param column_labels: list of tuples with the same length
The multi-level values in the tuples.
:param data_spark_columns: list of Spark Column
Spark Columns to appear as columns. If this is None, calculated
from spark_frame.
:param data_fields: list of InternalField
the InternalFields for the data columns
:param column_label_names: Names for each of the column index levels.
See the examples below to refer what each parameter means.
>>> column_labels = pd.MultiIndex.from_tuples(
... [('a', 'x'), ('a', 'y'), ('b', 'z')], names=["column_labels_a", "column_labels_b"])
>>> row_index = pd.MultiIndex.from_tuples(
... [('foo', 'bar'), ('foo', 'bar'), ('zoo', 'bar')],
... names=["row_index_a", "row_index_b"])
>>> psdf = ps.DataFrame(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=row_index, columns=column_labels)
>>> psdf.set_index(('a', 'x'), append=True, inplace=True)
>>> psdf # doctest: +NORMALIZE_WHITESPACE
column_labels_a a b
column_labels_b y z
row_index_a row_index_b (a, x)
foo bar 1 2 3
4 5 6
zoo bar 7 8 9
>>> internal = psdf._internal
>>> internal.spark_frame.show() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
+-----------------+-----------------+------+------+------+...
|__index_level_0__|__index_level_1__|(a, x)|(a, y)|(b, z)|...
+-----------------+-----------------+------+------+------+...
| foo| bar| 1| 2| 3|...
| foo| bar| 4| 5| 6|...
| zoo| bar| 7| 8| 9|...
+-----------------+-----------------+------+------+------+...
>>> internal.index_spark_columns # doctest: +SKIP
[Column<'__index_level_0__'>, Column<'__index_level_1__'>, Column<'(a, x)'>]
>>> internal.index_names
[('row_index_a',), ('row_index_b',), ('a', 'x')]
>>> internal.index_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=object,struct_field=StructField(__index_level_0__,StringType,false)),
InternalField(dtype=object,struct_field=StructField(__index_level_1__,StringType,false)),
InternalField(dtype=int64,struct_field=StructField((a, x),LongType,false))]
>>> internal.column_labels
[('a', 'y'), ('b', 'z')]
>>> internal.data_spark_columns # doctest: +SKIP
[Column<'(a, y)'>, Column<'(b, z)'>]
>>> internal.data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=int64,struct_field=StructField((a, y),LongType,false)),
InternalField(dtype=int64,struct_field=StructField((b, z),LongType,false))]
>>> internal.column_label_names
[('column_labels_a',), ('column_labels_b',)]
"""
assert isinstance(spark_frame, SparkDataFrame)
assert not spark_frame.isStreaming, "pandas-on-Spark does not support Structured Streaming."
if not index_spark_columns:
if data_spark_columns is not None:
if column_labels is not None:
data_spark_columns = [
scol.alias(name_like_string(label))
for scol, label in zip(data_spark_columns, column_labels)
]
spark_frame = spark_frame.select(data_spark_columns)
assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in spark_frame.columns), (
"Index columns should not appear in columns of the Spark DataFrame. Avoid "
"index column names [%s]." % SPARK_INDEX_NAME_PATTERN
)
# Create default index.
spark_frame = InternalFrame.attach_default_index(spark_frame)
index_spark_columns = [scol_for(spark_frame, SPARK_DEFAULT_INDEX_NAME)]
index_fields = [
InternalField.from_struct_field(
StructField(SPARK_DEFAULT_INDEX_NAME, LongType(), nullable=False)
)
]
if data_spark_columns is not None:
data_struct_fields = [
field
for field in spark_frame.schema.fields
if field.name != SPARK_DEFAULT_INDEX_NAME
]
data_spark_columns = [
scol_for(spark_frame, field.name) for field in data_struct_fields
]
if data_fields is not None:
data_fields = [
field.copy(
name=name_like_string(struct_field.name),
)
for field, struct_field in zip(data_fields, data_struct_fields)
]
if NATURAL_ORDER_COLUMN_NAME not in spark_frame.columns:
spark_frame = spark_frame.withColumn(
NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id()
)
self._sdf: SparkDataFrame = spark_frame
# index_spark_columns
assert all(
isinstance(index_scol, Column) for index_scol in index_spark_columns
), index_spark_columns
self._index_spark_columns: List[Column] = index_spark_columns
# data_spark_columns
if data_spark_columns is None:
data_spark_columns = [
scol_for(spark_frame, col)
for col in spark_frame.columns
if all(
not spark_column_equals(scol_for(spark_frame, col), index_scol)
for index_scol in index_spark_columns
)
and col not in HIDDEN_COLUMNS
]
else:
assert all(isinstance(scol, Column) for scol in data_spark_columns)
self._data_spark_columns: List[Column] = data_spark_columns
# fields
if index_fields is None:
index_fields = [None] * len(index_spark_columns)
if data_fields is None:
data_fields = [None] * len(data_spark_columns)
assert len(index_spark_columns) == len(index_fields), (
len(index_spark_columns),
len(index_fields),
)
assert len(data_spark_columns) == len(data_fields), (
len(data_spark_columns),
len(data_fields),
)
if any(field is None or field.struct_field is None for field in index_fields) and any(
field is None or field.struct_field is None for field in data_fields
):
schema = spark_frame.select(index_spark_columns + data_spark_columns).schema
fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(index_fields + data_fields, schema.fields)
]
index_fields = fields[: len(index_spark_columns)]
data_fields = fields[len(index_spark_columns) :]
elif any(field is None or field.struct_field is None for field in index_fields):
schema = spark_frame.select(index_spark_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(index_fields, schema.fields)
]
elif any(field is None or field.struct_field is None for field in data_fields):
schema = spark_frame.select(data_spark_columns).schema
data_fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(data_fields, schema.fields)
]
assert all(
isinstance(ops.dtype, Dtype.__args__) # type: ignore[attr-defined]
and (
ops.dtype == np.dtype("object")
or as_spark_type(ops.dtype, raise_error=False) is not None
)
for ops in index_fields
), index_fields
if is_testing():
struct_fields = spark_frame.select(index_spark_columns).schema.fields
assert all(
index_field.struct_field == struct_field
for index_field, struct_field in zip(index_fields, struct_fields)
), (index_fields, struct_fields)
self._index_fields: List[InternalField] = index_fields
assert all(
isinstance(ops.dtype, Dtype.__args__) # type: ignore[attr-defined]
and (
ops.dtype == np.dtype("object")
or as_spark_type(ops.dtype, raise_error=False) is not None
)
for ops in data_fields
), data_fields
if is_testing():
struct_fields = spark_frame.select(data_spark_columns).schema.fields
assert all(
data_field.struct_field == struct_field
for data_field, struct_field in zip(data_fields, struct_fields)
), (data_fields, struct_fields)
self._data_fields: List[InternalField] = data_fields
# index_names
if not index_names:
index_names = [None] * len(index_spark_columns)
assert len(index_spark_columns) == len(index_names), (
len(index_spark_columns),
len(index_names),
)
assert all(
is_name_like_tuple(index_name, check_type=True) for index_name in index_names
), index_names
self._index_names: List[Optional[Label]] = index_names
# column_labels
if column_labels is None:
column_labels = [(col,) for col in spark_frame.select(self._data_spark_columns).columns]
else:
assert len(column_labels) == len(self._data_spark_columns), (
len(column_labels),
len(self._data_spark_columns),
)
if len(column_labels) == 1:
column_label = column_labels[0]
assert is_name_like_tuple(column_label, check_type=True), column_label
else:
assert all(
is_name_like_tuple(column_label, check_type=True)
for column_label in column_labels
), column_labels
assert len(set(len(label) for label in column_labels)) <= 1, column_labels
self._column_labels: List[Label] = column_labels
# column_label_names
if column_label_names is None:
column_label_names = [None] * column_labels_level(self._column_labels)
else:
if len(self._column_labels) > 0:
assert len(column_label_names) == column_labels_level(self._column_labels), (
len(column_label_names),
column_labels_level(self._column_labels),
)
else:
assert len(column_label_names) > 0, len(column_label_names)
assert all(
is_name_like_tuple(column_label_name, check_type=True)
for column_label_name in column_label_names
), column_label_names
self._column_label_names: List[Optional[Label]] = column_label_names
@staticmethod
def attach_default_index(
sdf: SparkDataFrame, default_index_type: Optional[str] = None
) -> SparkDataFrame:
"""
This method attaches a default index to Spark DataFrame. Spark does not have the index
notion so corresponding column should be generated.
There are several types of default index can be configured by `compute.default_index_type`.
>>> spark_frame = ps.range(10).to_spark()
>>> spark_frame
DataFrame[id: bigint]
It adds the default index column '__index_level_0__'.
>>> spark_frame = InternalFrame.attach_default_index(spark_frame)
>>> spark_frame
DataFrame[__index_level_0__: bigint, id: bigint]
It throws an exception if the given column name already exists.
>>> InternalFrame.attach_default_index(spark_frame)
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: '__index_level_0__' already exists...
"""
index_column = SPARK_DEFAULT_INDEX_NAME
assert (
index_column not in sdf.columns
), "'%s' already exists in the Spark column names '%s'" % (index_column, sdf.columns)
if default_index_type is None:
default_index_type = ps.get_option("compute.default_index_type")
if default_index_type == "sequence":
return InternalFrame.attach_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed-sequence":
return InternalFrame.attach_distributed_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed":
return InternalFrame.attach_distributed_column(sdf, column_name=index_column)
else:
raise ValueError(
"'compute.default_index_type' should be one of 'sequence',"
" 'distributed-sequence' and 'distributed'"
)
@staticmethod
def attach_sequence_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
sequential_index = (
F.row_number().over(Window.orderBy(F.monotonically_increasing_id())).cast("long") - 1
)
return sdf.select(sequential_index.alias(column_name), *scols)
@staticmethod
def attach_distributed_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
return sdf.select(F.monotonically_increasing_id().alias(column_name), *scols)
@staticmethod
def attach_distributed_sequence_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:
"""
This method attaches a Spark column that has a sequence in a distributed manner.
This is equivalent to the column assigned when default index type 'distributed-sequence'.
>>> sdf = ps.DataFrame(['a', 'b', 'c']).to_spark()
>>> sdf = InternalFrame.attach_distributed_sequence_column(sdf, column_name="sequence")
>>> sdf.show() # doctest: +NORMALIZE_WHITESPACE
+--------+---+
|sequence| 0|
+--------+---+
| 0| a|
| 1| b|
| 2| c|
+--------+---+
"""
if len(sdf.columns) > 0:
return SparkDataFrame(
sdf._jdf.toDF().withSequenceColumn(column_name), # type: ignore[operator]
sdf.sql_ctx,
)
else:
cnt = sdf.count()
if cnt > 0:
return default_session().range(cnt).toDF(column_name)
else:
return default_session().createDataFrame(
[], schema=StructType().add(column_name, data_type=LongType(), nullable=False)
)
def spark_column_for(self, label: Label) -> Column:
"""Return Spark Column for the given column label."""
column_labels_to_scol = dict(zip(self.column_labels, self.data_spark_columns))
if label in column_labels_to_scol:
return column_labels_to_scol[label]
else:
raise KeyError(name_like_string(label))
def spark_column_name_for(self, label_or_scol: Union[Label, Column]) -> str:
"""Return the actual Spark column name for the given column label."""
if isinstance(label_or_scol, Column):
return self.spark_frame.select(label_or_scol).columns[0]
else:
return self.field_for(label_or_scol).name
def spark_type_for(self, label_or_scol: Union[Label, Column]) -> DataType:
"""Return DataType for the given column label."""
if isinstance(label_or_scol, Column):
return self.spark_frame.select(label_or_scol).schema[0].dataType
else:
return self.field_for(label_or_scol).spark_type
def spark_column_nullable_for(self, label_or_scol: Union[Label, Column]) -> bool:
"""Return nullability for the given column label."""
if isinstance(label_or_scol, Column):
return self.spark_frame.select(label_or_scol).schema[0].nullable
else:
return self.field_for(label_or_scol).nullable
def field_for(self, label: Label) -> InternalField:
"""Return InternalField for the given column label."""
column_labels_to_fields = dict(zip(self.column_labels, self.data_fields))
if label in column_labels_to_fields:
return column_labels_to_fields[label]
else:
raise KeyError(name_like_string(label))
@property
def spark_frame(self) -> SparkDataFrame:
"""Return the managed Spark DataFrame."""
return self._sdf
@lazy_property
def data_spark_column_names(self) -> List[str]:
"""Return the managed column field names."""
return [field.name for field in self.data_fields]
@property
def data_spark_columns(self) -> List[Column]:
"""Return Spark Columns for the managed data columns."""
return self._data_spark_columns
@property
def index_spark_column_names(self) -> List[str]:
"""Return the managed index field names."""
return [field.name for field in self.index_fields]
@property
def index_spark_columns(self) -> List[Column]:
"""Return Spark Columns for the managed index columns."""
return self._index_spark_columns
@lazy_property
def spark_column_names(self) -> List[str]:
"""Return all the field names including index field names."""
return self.spark_frame.select(self.spark_columns).columns
@lazy_property
def spark_columns(self) -> List[Column]:
"""Return Spark Columns for the managed columns including index columns."""
index_spark_columns = self.index_spark_columns
return index_spark_columns + [
spark_column
for spark_column in self.data_spark_columns
if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns)
]
@property
def index_names(self) -> List[Optional[Label]]:
"""Return the managed index names."""
return self._index_names
@lazy_property
def index_level(self) -> int:
"""Return the level of the index."""
return len(self._index_names)
@property
def column_labels(self) -> List[Label]:
"""Return the managed column index."""
return self._column_labels
@lazy_property
def column_labels_level(self) -> int:
"""Return the level of the column index."""
return len(self._column_label_names)
@property
def column_label_names(self) -> List[Optional[Label]]:
"""Return names of the index levels."""
return self._column_label_names
@property
def index_fields(self) -> List[InternalField]:
"""Return InternalFields for the managed index columns."""
return self._index_fields
@property
def data_fields(self) -> List[InternalField]:
"""Return InternalFields for the managed columns."""
return self._data_fields
@lazy_property
def to_internal_spark_frame(self) -> SparkDataFrame:
"""
Return as Spark DataFrame. This contains index columns as well
and should be only used for internal purposes.
"""
index_spark_columns = self.index_spark_columns
data_columns = []
for spark_column in self.data_spark_columns:
if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns):
data_columns.append(spark_column)
return self.spark_frame.select(index_spark_columns + data_columns)
@lazy_property
def to_pandas_frame(self) -> pd.DataFrame:
"""Return as pandas DataFrame."""
sdf = self.to_internal_spark_frame
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
pdf = pdf.astype(
{field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}
)
return InternalFrame.restore_index(pdf, **self.arguments_for_restore_index)
@lazy_property
def arguments_for_restore_index(self) -> Dict:
"""Create arguments for `restore_index`."""
column_names = []
fields = self.index_fields.copy()
for spark_column, column_name, field in zip(
self.data_spark_columns, self.data_spark_column_names, self.data_fields
):
for index_spark_column_name, index_spark_column in zip(
self.index_spark_column_names, self.index_spark_columns
):
if spark_column_equals(spark_column, index_spark_column):
column_names.append(index_spark_column_name)
break
else:
column_names.append(column_name)
fields.append(field)
return dict(
index_columns=self.index_spark_column_names,
index_names=self.index_names,
data_columns=column_names,
column_labels=self.column_labels,
column_label_names=self.column_label_names,
fields=fields,
)
@staticmethod
def restore_index(
pdf: pd.DataFrame,
*,
index_columns: List[str],
index_names: List[Label],
data_columns: List[str],
column_labels: List[Label],
column_label_names: List[Label],
fields: List[InternalField] = None,
) -> pd.DataFrame:
"""
Restore pandas DataFrame indices using the metadata.
:param pdf: the pandas DataFrame to be processed.
:param index_columns: the original column names for index columns.
:param index_names: the index names after restored.
:param data_columns: the original column names for data columns.
:param column_labels: the column labels after restored.
:param column_label_names: the column label names after restored.
:param fields: the fields after restored.
:return: the restored pandas DataFrame
>>> from numpy import dtype
>>> pdf = pd.DataFrame({"index": [10, 20, 30], "a": ['a', 'b', 'c'], "b": [0, 2, 1]})
>>> InternalFrame.restore_index(
... pdf,
... index_columns=["index"],
... index_names=[("idx",)],
... data_columns=["a", "b", "index"],
... column_labels=[("x",), ("y",), ("z",)],
... column_label_names=[("lv1",)],
... fields=[
... InternalField(
... dtype=dtype('int64'),
... struct_field=StructField(name='index', dataType=LongType(), nullable=False),
... ),
... InternalField(
... dtype=dtype('object'),
... struct_field=StructField(name='a', dataType=StringType(), nullable=False),
... ),
... InternalField(
... dtype=CategoricalDtype(categories=["i", "j", "k"]),
... struct_field=StructField(name='b', dataType=LongType(), nullable=False),
... ),
... ],
... ) # doctest: +NORMALIZE_WHITESPACE
lv1 x y z
idx
10 a i 10
20 b k 20
30 c j 30
"""
for col, field in zip(pdf.columns, fields):
pdf[col] = DataTypeOps(field.dtype, field.spark_type).restore(pdf[col])
append = False
for index_field in index_columns:
drop = index_field not in data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[data_columns]
pdf.index.names = [
name if name is None or len(name) > 1 else name[0] for name in index_names
]
names = [name if name is None or len(name) > 1 else name[0] for name in column_label_names]
if len(column_label_names) > 1:
pdf.columns = pd.MultiIndex.from_tuples(column_labels, names=names)
else:
pdf.columns = pd.Index(
[None if label is None else label[0] for label in column_labels],
name=names[0],
)
return pdf
@lazy_property
def resolved_copy(self) -> "InternalFrame":
"""Copy the immutable InternalFrame with the updates resolved."""
sdf = self.spark_frame.select(self.spark_columns + list(HIDDEN_COLUMNS))
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
data_spark_columns=[scol_for(sdf, col) for col in self.data_spark_column_names],
)
def with_new_sdf(
self,
spark_frame: SparkDataFrame,
*,
index_fields: Optional[List[InternalField]] = None,
data_columns: Optional[List[str]] = None,
data_fields: Optional[List[InternalField]] = None,
) -> "InternalFrame":
"""Copy the immutable InternalFrame with the updates by the specified Spark DataFrame.
:param spark_frame: the new Spark DataFrame
:param index_fields: the new InternalFields for the index columns.
If None, the original dtyeps are used.
:param data_columns: the new column names. If None, the original one is used.
:param data_fields: the new InternalFields for the data columns.
If None, the original dtyeps are used.
:return: the copied InternalFrame.
"""
if index_fields is None:
index_fields = self.index_fields
else:
assert len(index_fields) == len(self.index_fields), (
len(index_fields),
len(self.index_fields),
)
if data_columns is None:
data_columns = self.data_spark_column_names
else:
assert len(data_columns) == len(self.column_labels), (
len(data_columns),
len(self.column_labels),
)
if data_fields is None:
data_fields = self.data_fields
else:
assert len(data_fields) == len(self.column_labels), (
len(data_fields),
len(self.column_labels),
)
sdf = spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
index_fields=index_fields,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=data_fields,
)
def with_new_columns(
self,
scols_or_pssers: Sequence[Union[Column, "Series"]],
*,
column_labels: Optional[List[Label]] = None,
data_fields: Optional[List[InternalField]] = None,
column_label_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,
keep_order: bool = True,
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Columns or Series.
:param scols_or_pssers: the new Spark Columns or Series.
:param column_labels: the new column index.
If None, the column_labels of the corresponding `scols_or_pssers` is used if it is
Series; otherwise the original one is used.
:param data_fields: the new InternalFields for the data columns.
If None, the dtypes of the corresponding `scols_or_pssers` is used if it is Series;
otherwise the dtypes will be inferred from the corresponding `scols_or_pssers`.
:param column_label_names: the new names of the column index levels.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if column_labels is None:
if all(isinstance(scol_or_psser, Series) for scol_or_psser in scols_or_pssers):
column_labels = [cast(Series, psser)._column_label for psser in scols_or_pssers]
else:
assert len(scols_or_pssers) == len(self.column_labels), (
len(scols_or_pssers),
len(self.column_labels),
)
column_labels = []
for scol_or_psser, label in zip(scols_or_pssers, self.column_labels):
if isinstance(scol_or_psser, Series):
column_labels.append(scol_or_psser._column_label)
else:
column_labels.append(label)
else:
assert len(scols_or_pssers) == len(column_labels), (
len(scols_or_pssers),
len(column_labels),
)
data_spark_columns = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
scol = scol_or_psser.spark.column
else:
scol = scol_or_psser
data_spark_columns.append(scol)
if data_fields is None:
data_fields = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
data_fields.append(scol_or_psser._internal.data_fields[0])
else:
data_fields.append(None)
else:
assert len(scols_or_pssers) == len(data_fields), (
len(scols_or_pssers),
len(data_fields),
)
sdf = self.spark_frame
if not keep_order:
sdf = self.spark_frame.select(self.index_spark_columns + data_spark_columns)
index_spark_columns = [scol_for(sdf, col) for col in self.index_spark_column_names]
data_spark_columns = [
scol_for(sdf, col) for col in self.spark_frame.select(data_spark_columns).columns
]
else:
index_spark_columns = self.index_spark_columns
if column_label_names is _NoValue:
column_label_names = self._column_label_names
return self.copy(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
data_fields=data_fields,
column_label_names=column_label_names,
)
def with_filter(self, pred: Union[Column, "Series"]) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the predicate.
:param pred: the predicate to filter.
:return: the copied InternalFrame.
"""
from pyspark.pandas.series import Series
if isinstance(pred, Series):
assert isinstance(pred.spark.data_type, BooleanType), pred.spark.data_type
condition = pred.spark.column
else:
condition = pred
spark_type = self.spark_frame.select(condition).schema[0].dataType
assert isinstance(spark_type, BooleanType), spark_type
return self.with_new_sdf(self.spark_frame.filter(condition).select(self.spark_columns))
def with_new_spark_column(
self,
column_label: Label,
scol: Column,
*,
field: Optional[InternalField] = None,
keep_order: bool = True,
) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the updates by the specified Spark Column.
:param column_label: the column label to be updated.
:param scol: the new Spark Column
:param field: the new InternalField for the data column.
If not specified, the InternalField will be inferred from the spark Column.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
idx = self.column_labels.index(column_label)
data_spark_columns = self.data_spark_columns.copy()
data_spark_columns[idx] = scol
data_fields = self.data_fields.copy()
data_fields[idx] = field
return self.with_new_columns(
data_spark_columns, data_fields=data_fields, keep_order=keep_order
)
def select_column(self, column_label: Label) -> "InternalFrame":
"""
Copy the immutable InternalFrame with the specified column.
:param column_label: the column label to use.
:return: the copied InternalFrame.
"""
assert column_label in self.column_labels, column_label
return self.copy(
column_labels=[column_label],
data_spark_columns=[self.spark_column_for(column_label)],
data_fields=[self.field_for(column_label)],
column_label_names=None,
)
def copy(
self,
*,
spark_frame: Union[SparkDataFrame, _NoValueType] = _NoValue,
index_spark_columns: Union[List[Column], _NoValueType] = _NoValue,
index_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,
index_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,
column_labels: Union[Optional[List[Label]], _NoValueType] = _NoValue,
data_spark_columns: Union[Optional[List[Column]], _NoValueType] = _NoValue,
data_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,
column_label_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,
) -> "InternalFrame":
"""
Copy the immutable InternalFrame.
:param spark_frame: the new Spark DataFrame. If not specified, the original one is used.
:param index_spark_columns: the list of Spark Column.
If not specified, the original ones are used.
:param index_names: the index names. If not specified, the original ones are used.
:param index_fields: the new InternalFields for the index columns.
If not specified, the original metadata are used.
:param column_labels: the new column labels. If not specified, the original ones are used.
:param data_spark_columns: the new Spark Columns.
If not specified, the original ones are used.
:param data_fields: the new InternalFields for the data columns.
If not specified, the original metadata are used.
:param column_label_names: the new names of the column index levels.
If not specified, the original ones are used.
:return: the copied immutable InternalFrame.
"""
if spark_frame is _NoValue:
spark_frame = self.spark_frame
if index_spark_columns is _NoValue:
index_spark_columns = self.index_spark_columns
if index_names is _NoValue:
index_names = self.index_names
if index_fields is _NoValue:
index_fields = self.index_fields
if column_labels is _NoValue:
column_labels = self.column_labels
if data_spark_columns is _NoValue:
data_spark_columns = self.data_spark_columns
if data_fields is _NoValue:
data_fields = self.data_fields
if column_label_names is _NoValue:
column_label_names = self.column_label_names
return InternalFrame(
spark_frame=cast(SparkDataFrame, spark_frame),
index_spark_columns=cast(List[Column], index_spark_columns),
index_names=cast(Optional[List[Optional[Label]]], index_names),
index_fields=cast(Optional[List[InternalField]], index_fields),
column_labels=cast(Optional[List[Label]], column_labels),
data_spark_columns=cast(Optional[List[Column]], data_spark_columns),
data_fields=cast(Optional[List[InternalField]], data_fields),
column_label_names=cast(Optional[List[Optional[Label]]], column_label_names),
)
@staticmethod
def from_pandas(pdf: pd.DataFrame) -> "InternalFrame":
"""Create an immutable DataFrame from pandas DataFrame.
:param pdf: :class:`pd.DataFrame`
:return: the created immutable DataFrame
"""
index_names: List[Optional[Label]] = [
name if name is None or isinstance(name, tuple) else (name,) for name in pdf.index.names
]
columns = pdf.columns
column_labels: List[Label]
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
else:
column_labels = [(col,) for col in columns]
column_label_names: List[Optional[Label]] = [
name if name is None or isinstance(name, tuple) else (name,) for name in columns.names
]
prefer_timestamp_ntz = is_timestamp_ntz_preferred()
(
pdf,
index_columns,
index_fields,
data_columns,
data_fields,
) = InternalFrame.prepare_pandas_frame(pdf, prefer_timestamp_ntz=prefer_timestamp_ntz)
schema = StructType([field.struct_field for field in index_fields + data_fields])
sdf = default_session().createDataFrame(pdf, schema=schema)
return InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
@staticmethod
def prepare_pandas_frame(
pdf: pd.DataFrame, *, retain_index: bool = True, prefer_timestamp_ntz: bool = False
) -> Tuple[pd.DataFrame, List[str], List[InternalField], List[str], List[InternalField]]:
"""
Prepare pandas DataFrame for creating Spark DataFrame.
:param pdf: the pandas DataFrame to be prepared.
:param retain_index: whether the indices should be retained.
:return: the tuple of
- the prepared pandas dataFrame
- index column names for Spark DataFrame
- the InternalFields for the index columns of the given pandas DataFrame
- data column names for Spark DataFrame
- the InternalFields for the data columns of the given pandas DataFrame
>>> pdf = pd.DataFrame(
... {("x", "a"): ['a', 'b', 'c'],
... ("y", "b"): pd.Categorical(["i", "k", "j"], categories=["i", "j", "k"])},
... index=[10, 20, 30])
>>> prepared, index_columns, index_fields, data_columns, data_fields = (
... InternalFrame.prepare_pandas_frame(pdf)
... )
>>> prepared
__index_level_0__ (x, a) (y, b)
0 10 a 0
1 20 b 2
2 30 c 1
>>> index_columns
['__index_level_0__']
>>> index_fields
[InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,false))]
>>> data_columns
['(x, a)', '(y, b)']
>>> data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=object,struct_field=StructField((x, a),StringType,false)),
InternalField(dtype=category,struct_field=StructField((y, b),ByteType,false))]
>>> import datetime
>>> pdf = pd.DataFrame({
... "dt": [datetime.datetime(1970, 1, 1)], "dt_obj": [datetime.datetime(1970, 1, 1)]
... })
>>> pdf.dt_obj = pdf.dt_obj.astype("object")
>>> _, _, _, _, data_fields = (
... InternalFrame.prepare_pandas_frame(pdf, prefer_timestamp_ntz=True)
... )
>>> data_fields
[InternalField(dtype=datetime64[ns],struct_field=StructField(dt,TimestampNTZType,false)),
InternalField(dtype=object,struct_field=StructField(dt_obj,TimestampNTZType,false))]
>>> pdf = pd.DataFrame({
... "td": [datetime.timedelta(0)], "td_obj": [datetime.timedelta(0)]
... })
>>> pdf.td_obj = pdf.td_obj.astype("object")
>>> _, _, _, _, data_fields = (
... InternalFrame.prepare_pandas_frame(pdf)
... )
>>> data_fields # doctest: +NORMALIZE_WHITESPACE
[InternalField(dtype=timedelta64[ns],struct_field=StructField(td,DayTimeIntervalType(0,3),false)),
InternalField(dtype=object,struct_field=StructField(td_obj,DayTimeIntervalType(0,3),false))]
"""
pdf = pdf.copy()
data_columns = [name_like_string(col) for col in pdf.columns]
pdf.columns = data_columns
if retain_index:
index_nlevels = pdf.index.nlevels
index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(index_nlevels)]
pdf.index.names = index_columns
reset_index = pdf.reset_index()
else:
index_nlevels = 0
index_columns = []
reset_index = pdf
index_dtypes = list(reset_index.dtypes)[:index_nlevels]
data_dtypes = list(reset_index.dtypes)[index_nlevels:]
for col, dtype in zip(reset_index.columns, reset_index.dtypes):
spark_type = infer_pd_series_spark_type(reset_index[col], dtype, prefer_timestamp_ntz)
reset_index[col] = DataTypeOps(dtype, spark_type).prepare(reset_index[col])
fields = [
InternalField(
dtype=dtype,
struct_field=StructField(
name=str(name),
dataType=infer_pd_series_spark_type(col, dtype, prefer_timestamp_ntz),
nullable=bool(col.isnull().any()),
),
)
for (name, col), dtype in zip(reset_index.iteritems(), index_dtypes + data_dtypes)
]
return (
reset_index,
index_columns,
fields[:index_nlevels],
data_columns,
fields[index_nlevels:],
)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.internal
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.internal.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.internal tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.internal,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 40.172671 | 106 | 0.581434 |
import re
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, TYPE_CHECKING, cast
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark._globals import _NoValue, _NoValueType
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame, Window
from pyspark.sql.types import (
BooleanType,
DataType,
LongType,
StructField,
StructType,
StringType,
)
from pyspark.sql.utils import is_timestamp_ntz_preferred
from pyspark import pandas as ps
from pyspark.pandas._typing import Label
if TYPE_CHECKING:
from pyspark.pandas.series import Series
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.data_type_ops.base import DataTypeOps
from pyspark.pandas.typedef import (
Dtype,
as_spark_type,
extension_dtypes,
infer_pd_series_spark_type,
spark_type_to_pandas_dtype,
)
from pyspark.pandas.utils import (
column_labels_level,
default_session,
is_name_like_tuple,
is_testing,
lazy_property,
name_like_string,
scol_for,
spark_column_equals,
)
SPARK_INDEX_NAME_FORMAT = "__index_level_{}__".format
SPARK_DEFAULT_INDEX_NAME = SPARK_INDEX_NAME_FORMAT(0)
SPARK_INDEX_NAME_PATTERN = re.compile(r"__index_level_[0-9]+__")
NATURAL_ORDER_COLUMN_NAME = "__natural_order__"
HIDDEN_COLUMNS = {NATURAL_ORDER_COLUMN_NAME}
DEFAULT_SERIES_NAME = 0
SPARK_DEFAULT_SERIES_NAME = str(DEFAULT_SERIES_NAME)
class InternalField:
def __init__(self, dtype: Dtype, struct_field: Optional[StructField] = None):
self._dtype = dtype
self._struct_field = struct_field
@staticmethod
def from_struct_field(
struct_field: StructField, *, use_extension_dtypes: bool = False
) -> "InternalField":
return InternalField(
dtype=spark_type_to_pandas_dtype(
struct_field.dataType, use_extension_dtypes=use_extension_dtypes
),
struct_field=struct_field,
)
@property
def dtype(self) -> Dtype:
return self._dtype
@property
def struct_field(self) -> Optional[StructField]:
return self._struct_field
@property
def name(self) -> str:
assert self.struct_field is not None
return self.struct_field.name
@property
def spark_type(self) -> DataType:
assert self.struct_field is not None
return self.struct_field.dataType
@property
def nullable(self) -> bool:
assert self.struct_field is not None
return self.struct_field.nullable
@property
def metadata(self) -> Dict[str, Any]:
assert self.struct_field is not None
return self.struct_field.metadata
@property
def is_extension_dtype(self) -> bool:
return isinstance(self.dtype, extension_dtypes)
def normalize_spark_type(self) -> "InternalField":
assert self.struct_field is not None
return self.copy(
spark_type=force_decimal_precision_scale(as_nullable_spark_type(self.spark_type)),
nullable=True,
)
def copy(
self,
*,
name: Union[str, _NoValueType] = _NoValue,
dtype: Union[Dtype, _NoValueType] = _NoValue,
spark_type: Union[DataType, _NoValueType] = _NoValue,
nullable: Union[bool, _NoValueType] = _NoValue,
metadata: Union[Optional[Dict[str, Any]], _NoValueType] = _NoValue,
) -> "InternalField":
if name is _NoValue:
name = self.name
if dtype is _NoValue:
dtype = self.dtype
if spark_type is _NoValue:
spark_type = self.spark_type
if nullable is _NoValue:
nullable = self.nullable
if metadata is _NoValue:
metadata = self.metadata
return InternalField(
dtype=cast(Dtype, dtype),
struct_field=StructField(
name=cast(str, name),
dataType=cast(DataType, spark_type),
nullable=cast(bool, nullable),
metadata=cast(Optional[Dict[str, Any]], metadata),
),
)
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, InternalField)
and self.dtype == other.dtype
and self.struct_field == other.struct_field
)
def __repr__(self) -> str:
return "InternalField(dtype={dtype},struct_field={struct_field})".format(
dtype=self.dtype, struct_field=self.struct_field
)
class InternalFrame:
def __init__(
self,
spark_frame: SparkDataFrame,
index_spark_columns: Optional[List[Column]],
index_names: Optional[List[Optional[Label]]] = None,
index_fields: Optional[List[InternalField]] = None,
column_labels: Optional[List[Label]] = None,
data_spark_columns: Optional[List[Column]] = None,
data_fields: Optional[List[InternalField]] = None,
column_label_names: Optional[List[Optional[Label]]] = None,
):
assert isinstance(spark_frame, SparkDataFrame)
assert not spark_frame.isStreaming, "pandas-on-Spark does not support Structured Streaming."
if not index_spark_columns:
if data_spark_columns is not None:
if column_labels is not None:
data_spark_columns = [
scol.alias(name_like_string(label))
for scol, label in zip(data_spark_columns, column_labels)
]
spark_frame = spark_frame.select(data_spark_columns)
assert not any(SPARK_INDEX_NAME_PATTERN.match(name) for name in spark_frame.columns), (
"Index columns should not appear in columns of the Spark DataFrame. Avoid "
"index column names [%s]." % SPARK_INDEX_NAME_PATTERN
)
spark_frame = InternalFrame.attach_default_index(spark_frame)
index_spark_columns = [scol_for(spark_frame, SPARK_DEFAULT_INDEX_NAME)]
index_fields = [
InternalField.from_struct_field(
StructField(SPARK_DEFAULT_INDEX_NAME, LongType(), nullable=False)
)
]
if data_spark_columns is not None:
data_struct_fields = [
field
for field in spark_frame.schema.fields
if field.name != SPARK_DEFAULT_INDEX_NAME
]
data_spark_columns = [
scol_for(spark_frame, field.name) for field in data_struct_fields
]
if data_fields is not None:
data_fields = [
field.copy(
name=name_like_string(struct_field.name),
)
for field, struct_field in zip(data_fields, data_struct_fields)
]
if NATURAL_ORDER_COLUMN_NAME not in spark_frame.columns:
spark_frame = spark_frame.withColumn(
NATURAL_ORDER_COLUMN_NAME, F.monotonically_increasing_id()
)
self._sdf: SparkDataFrame = spark_frame
assert all(
isinstance(index_scol, Column) for index_scol in index_spark_columns
), index_spark_columns
self._index_spark_columns: List[Column] = index_spark_columns
if data_spark_columns is None:
data_spark_columns = [
scol_for(spark_frame, col)
for col in spark_frame.columns
if all(
not spark_column_equals(scol_for(spark_frame, col), index_scol)
for index_scol in index_spark_columns
)
and col not in HIDDEN_COLUMNS
]
else:
assert all(isinstance(scol, Column) for scol in data_spark_columns)
self._data_spark_columns: List[Column] = data_spark_columns
if index_fields is None:
index_fields = [None] * len(index_spark_columns)
if data_fields is None:
data_fields = [None] * len(data_spark_columns)
assert len(index_spark_columns) == len(index_fields), (
len(index_spark_columns),
len(index_fields),
)
assert len(data_spark_columns) == len(data_fields), (
len(data_spark_columns),
len(data_fields),
)
if any(field is None or field.struct_field is None for field in index_fields) and any(
field is None or field.struct_field is None for field in data_fields
):
schema = spark_frame.select(index_spark_columns + data_spark_columns).schema
fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(index_fields + data_fields, schema.fields)
]
index_fields = fields[: len(index_spark_columns)]
data_fields = fields[len(index_spark_columns) :]
elif any(field is None or field.struct_field is None for field in index_fields):
schema = spark_frame.select(index_spark_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(index_fields, schema.fields)
]
elif any(field is None or field.struct_field is None for field in data_fields):
schema = spark_frame.select(data_spark_columns).schema
data_fields = [
InternalField.from_struct_field(struct_field)
if field is None
else InternalField(field.dtype, struct_field)
if field.struct_field is None
else field
for field, struct_field in zip(data_fields, schema.fields)
]
assert all(
isinstance(ops.dtype, Dtype.__args__)
and (
ops.dtype == np.dtype("object")
or as_spark_type(ops.dtype, raise_error=False) is not None
)
for ops in index_fields
), index_fields
if is_testing():
struct_fields = spark_frame.select(index_spark_columns).schema.fields
assert all(
index_field.struct_field == struct_field
for index_field, struct_field in zip(index_fields, struct_fields)
), (index_fields, struct_fields)
self._index_fields: List[InternalField] = index_fields
assert all(
isinstance(ops.dtype, Dtype.__args__)
and (
ops.dtype == np.dtype("object")
or as_spark_type(ops.dtype, raise_error=False) is not None
)
for ops in data_fields
), data_fields
if is_testing():
struct_fields = spark_frame.select(data_spark_columns).schema.fields
assert all(
data_field.struct_field == struct_field
for data_field, struct_field in zip(data_fields, struct_fields)
), (data_fields, struct_fields)
self._data_fields: List[InternalField] = data_fields
if not index_names:
index_names = [None] * len(index_spark_columns)
assert len(index_spark_columns) == len(index_names), (
len(index_spark_columns),
len(index_names),
)
assert all(
is_name_like_tuple(index_name, check_type=True) for index_name in index_names
), index_names
self._index_names: List[Optional[Label]] = index_names
if column_labels is None:
column_labels = [(col,) for col in spark_frame.select(self._data_spark_columns).columns]
else:
assert len(column_labels) == len(self._data_spark_columns), (
len(column_labels),
len(self._data_spark_columns),
)
if len(column_labels) == 1:
column_label = column_labels[0]
assert is_name_like_tuple(column_label, check_type=True), column_label
else:
assert all(
is_name_like_tuple(column_label, check_type=True)
for column_label in column_labels
), column_labels
assert len(set(len(label) for label in column_labels)) <= 1, column_labels
self._column_labels: List[Label] = column_labels
if column_label_names is None:
column_label_names = [None] * column_labels_level(self._column_labels)
else:
if len(self._column_labels) > 0:
assert len(column_label_names) == column_labels_level(self._column_labels), (
len(column_label_names),
column_labels_level(self._column_labels),
)
else:
assert len(column_label_names) > 0, len(column_label_names)
assert all(
is_name_like_tuple(column_label_name, check_type=True)
for column_label_name in column_label_names
), column_label_names
self._column_label_names: List[Optional[Label]] = column_label_names
@staticmethod
def attach_default_index(
sdf: SparkDataFrame, default_index_type: Optional[str] = None
) -> SparkDataFrame:
index_column = SPARK_DEFAULT_INDEX_NAME
assert (
index_column not in sdf.columns
), "'%s' already exists in the Spark column names '%s'" % (index_column, sdf.columns)
if default_index_type is None:
default_index_type = ps.get_option("compute.default_index_type")
if default_index_type == "sequence":
return InternalFrame.attach_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed-sequence":
return InternalFrame.attach_distributed_sequence_column(sdf, column_name=index_column)
elif default_index_type == "distributed":
return InternalFrame.attach_distributed_column(sdf, column_name=index_column)
else:
raise ValueError(
"'compute.default_index_type' should be one of 'sequence',"
" 'distributed-sequence' and 'distributed'"
)
@staticmethod
def attach_sequence_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
sequential_index = (
F.row_number().over(Window.orderBy(F.monotonically_increasing_id())).cast("long") - 1
)
return sdf.select(sequential_index.alias(column_name), *scols)
@staticmethod
def attach_distributed_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:
scols = [scol_for(sdf, column) for column in sdf.columns]
return sdf.select(F.monotonically_increasing_id().alias(column_name), *scols)
@staticmethod
def attach_distributed_sequence_column(sdf: SparkDataFrame, column_name: str) -> SparkDataFrame:
if len(sdf.columns) > 0:
return SparkDataFrame(
sdf._jdf.toDF().withSequenceColumn(column_name),
sdf.sql_ctx,
)
else:
cnt = sdf.count()
if cnt > 0:
return default_session().range(cnt).toDF(column_name)
else:
return default_session().createDataFrame(
[], schema=StructType().add(column_name, data_type=LongType(), nullable=False)
)
def spark_column_for(self, label: Label) -> Column:
column_labels_to_scol = dict(zip(self.column_labels, self.data_spark_columns))
if label in column_labels_to_scol:
return column_labels_to_scol[label]
else:
raise KeyError(name_like_string(label))
def spark_column_name_for(self, label_or_scol: Union[Label, Column]) -> str:
if isinstance(label_or_scol, Column):
return self.spark_frame.select(label_or_scol).columns[0]
else:
return self.field_for(label_or_scol).name
def spark_type_for(self, label_or_scol: Union[Label, Column]) -> DataType:
if isinstance(label_or_scol, Column):
return self.spark_frame.select(label_or_scol).schema[0].dataType
else:
return self.field_for(label_or_scol).spark_type
def spark_column_nullable_for(self, label_or_scol: Union[Label, Column]) -> bool:
if isinstance(label_or_scol, Column):
return self.spark_frame.select(label_or_scol).schema[0].nullable
else:
return self.field_for(label_or_scol).nullable
def field_for(self, label: Label) -> InternalField:
column_labels_to_fields = dict(zip(self.column_labels, self.data_fields))
if label in column_labels_to_fields:
return column_labels_to_fields[label]
else:
raise KeyError(name_like_string(label))
@property
def spark_frame(self) -> SparkDataFrame:
return self._sdf
@lazy_property
def data_spark_column_names(self) -> List[str]:
return [field.name for field in self.data_fields]
@property
def data_spark_columns(self) -> List[Column]:
return self._data_spark_columns
@property
def index_spark_column_names(self) -> List[str]:
return [field.name for field in self.index_fields]
@property
def index_spark_columns(self) -> List[Column]:
return self._index_spark_columns
@lazy_property
def spark_column_names(self) -> List[str]:
return self.spark_frame.select(self.spark_columns).columns
@lazy_property
def spark_columns(self) -> List[Column]:
index_spark_columns = self.index_spark_columns
return index_spark_columns + [
spark_column
for spark_column in self.data_spark_columns
if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns)
]
@property
def index_names(self) -> List[Optional[Label]]:
return self._index_names
@lazy_property
def index_level(self) -> int:
return len(self._index_names)
@property
def column_labels(self) -> List[Label]:
return self._column_labels
@lazy_property
def column_labels_level(self) -> int:
return len(self._column_label_names)
@property
def column_label_names(self) -> List[Optional[Label]]:
return self._column_label_names
@property
def index_fields(self) -> List[InternalField]:
return self._index_fields
@property
def data_fields(self) -> List[InternalField]:
return self._data_fields
@lazy_property
def to_internal_spark_frame(self) -> SparkDataFrame:
index_spark_columns = self.index_spark_columns
data_columns = []
for spark_column in self.data_spark_columns:
if all(not spark_column_equals(spark_column, scol) for scol in index_spark_columns):
data_columns.append(spark_column)
return self.spark_frame.select(index_spark_columns + data_columns)
@lazy_property
def to_pandas_frame(self) -> pd.DataFrame:
sdf = self.to_internal_spark_frame
pdf = sdf.toPandas()
if len(pdf) == 0 and len(sdf.schema) > 0:
pdf = pdf.astype(
{field.name: spark_type_to_pandas_dtype(field.dataType) for field in sdf.schema}
)
return InternalFrame.restore_index(pdf, **self.arguments_for_restore_index)
@lazy_property
def arguments_for_restore_index(self) -> Dict:
column_names = []
fields = self.index_fields.copy()
for spark_column, column_name, field in zip(
self.data_spark_columns, self.data_spark_column_names, self.data_fields
):
for index_spark_column_name, index_spark_column in zip(
self.index_spark_column_names, self.index_spark_columns
):
if spark_column_equals(spark_column, index_spark_column):
column_names.append(index_spark_column_name)
break
else:
column_names.append(column_name)
fields.append(field)
return dict(
index_columns=self.index_spark_column_names,
index_names=self.index_names,
data_columns=column_names,
column_labels=self.column_labels,
column_label_names=self.column_label_names,
fields=fields,
)
@staticmethod
def restore_index(
pdf: pd.DataFrame,
*,
index_columns: List[str],
index_names: List[Label],
data_columns: List[str],
column_labels: List[Label],
column_label_names: List[Label],
fields: List[InternalField] = None,
) -> pd.DataFrame:
for col, field in zip(pdf.columns, fields):
pdf[col] = DataTypeOps(field.dtype, field.spark_type).restore(pdf[col])
append = False
for index_field in index_columns:
drop = index_field not in data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[data_columns]
pdf.index.names = [
name if name is None or len(name) > 1 else name[0] for name in index_names
]
names = [name if name is None or len(name) > 1 else name[0] for name in column_label_names]
if len(column_label_names) > 1:
pdf.columns = pd.MultiIndex.from_tuples(column_labels, names=names)
else:
pdf.columns = pd.Index(
[None if label is None else label[0] for label in column_labels],
name=names[0],
)
return pdf
@lazy_property
def resolved_copy(self) -> "InternalFrame":
sdf = self.spark_frame.select(self.spark_columns + list(HIDDEN_COLUMNS))
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
data_spark_columns=[scol_for(sdf, col) for col in self.data_spark_column_names],
)
def with_new_sdf(
self,
spark_frame: SparkDataFrame,
*,
index_fields: Optional[List[InternalField]] = None,
data_columns: Optional[List[str]] = None,
data_fields: Optional[List[InternalField]] = None,
) -> "InternalFrame":
if index_fields is None:
index_fields = self.index_fields
else:
assert len(index_fields) == len(self.index_fields), (
len(index_fields),
len(self.index_fields),
)
if data_columns is None:
data_columns = self.data_spark_column_names
else:
assert len(data_columns) == len(self.column_labels), (
len(data_columns),
len(self.column_labels),
)
if data_fields is None:
data_fields = self.data_fields
else:
assert len(data_fields) == len(self.column_labels), (
len(data_fields),
len(self.column_labels),
)
sdf = spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
return self.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in self.index_spark_column_names],
index_fields=index_fields,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=data_fields,
)
def with_new_columns(
self,
scols_or_pssers: Sequence[Union[Column, "Series"]],
*,
column_labels: Optional[List[Label]] = None,
data_fields: Optional[List[InternalField]] = None,
column_label_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,
keep_order: bool = True,
) -> "InternalFrame":
from pyspark.pandas.series import Series
if column_labels is None:
if all(isinstance(scol_or_psser, Series) for scol_or_psser in scols_or_pssers):
column_labels = [cast(Series, psser)._column_label for psser in scols_or_pssers]
else:
assert len(scols_or_pssers) == len(self.column_labels), (
len(scols_or_pssers),
len(self.column_labels),
)
column_labels = []
for scol_or_psser, label in zip(scols_or_pssers, self.column_labels):
if isinstance(scol_or_psser, Series):
column_labels.append(scol_or_psser._column_label)
else:
column_labels.append(label)
else:
assert len(scols_or_pssers) == len(column_labels), (
len(scols_or_pssers),
len(column_labels),
)
data_spark_columns = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
scol = scol_or_psser.spark.column
else:
scol = scol_or_psser
data_spark_columns.append(scol)
if data_fields is None:
data_fields = []
for scol_or_psser in scols_or_pssers:
if isinstance(scol_or_psser, Series):
data_fields.append(scol_or_psser._internal.data_fields[0])
else:
data_fields.append(None)
else:
assert len(scols_or_pssers) == len(data_fields), (
len(scols_or_pssers),
len(data_fields),
)
sdf = self.spark_frame
if not keep_order:
sdf = self.spark_frame.select(self.index_spark_columns + data_spark_columns)
index_spark_columns = [scol_for(sdf, col) for col in self.index_spark_column_names]
data_spark_columns = [
scol_for(sdf, col) for col in self.spark_frame.select(data_spark_columns).columns
]
else:
index_spark_columns = self.index_spark_columns
if column_label_names is _NoValue:
column_label_names = self._column_label_names
return self.copy(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
data_fields=data_fields,
column_label_names=column_label_names,
)
def with_filter(self, pred: Union[Column, "Series"]) -> "InternalFrame":
from pyspark.pandas.series import Series
if isinstance(pred, Series):
assert isinstance(pred.spark.data_type, BooleanType), pred.spark.data_type
condition = pred.spark.column
else:
condition = pred
spark_type = self.spark_frame.select(condition).schema[0].dataType
assert isinstance(spark_type, BooleanType), spark_type
return self.with_new_sdf(self.spark_frame.filter(condition).select(self.spark_columns))
def with_new_spark_column(
self,
column_label: Label,
scol: Column,
*,
field: Optional[InternalField] = None,
keep_order: bool = True,
) -> "InternalFrame":
assert column_label in self.column_labels, column_label
idx = self.column_labels.index(column_label)
data_spark_columns = self.data_spark_columns.copy()
data_spark_columns[idx] = scol
data_fields = self.data_fields.copy()
data_fields[idx] = field
return self.with_new_columns(
data_spark_columns, data_fields=data_fields, keep_order=keep_order
)
def select_column(self, column_label: Label) -> "InternalFrame":
assert column_label in self.column_labels, column_label
return self.copy(
column_labels=[column_label],
data_spark_columns=[self.spark_column_for(column_label)],
data_fields=[self.field_for(column_label)],
column_label_names=None,
)
def copy(
self,
*,
spark_frame: Union[SparkDataFrame, _NoValueType] = _NoValue,
index_spark_columns: Union[List[Column], _NoValueType] = _NoValue,
index_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,
index_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,
column_labels: Union[Optional[List[Label]], _NoValueType] = _NoValue,
data_spark_columns: Union[Optional[List[Column]], _NoValueType] = _NoValue,
data_fields: Union[Optional[List[InternalField]], _NoValueType] = _NoValue,
column_label_names: Union[Optional[List[Optional[Label]]], _NoValueType] = _NoValue,
) -> "InternalFrame":
if spark_frame is _NoValue:
spark_frame = self.spark_frame
if index_spark_columns is _NoValue:
index_spark_columns = self.index_spark_columns
if index_names is _NoValue:
index_names = self.index_names
if index_fields is _NoValue:
index_fields = self.index_fields
if column_labels is _NoValue:
column_labels = self.column_labels
if data_spark_columns is _NoValue:
data_spark_columns = self.data_spark_columns
if data_fields is _NoValue:
data_fields = self.data_fields
if column_label_names is _NoValue:
column_label_names = self.column_label_names
return InternalFrame(
spark_frame=cast(SparkDataFrame, spark_frame),
index_spark_columns=cast(List[Column], index_spark_columns),
index_names=cast(Optional[List[Optional[Label]]], index_names),
index_fields=cast(Optional[List[InternalField]], index_fields),
column_labels=cast(Optional[List[Label]], column_labels),
data_spark_columns=cast(Optional[List[Column]], data_spark_columns),
data_fields=cast(Optional[List[InternalField]], data_fields),
column_label_names=cast(Optional[List[Optional[Label]]], column_label_names),
)
@staticmethod
def from_pandas(pdf: pd.DataFrame) -> "InternalFrame":
index_names: List[Optional[Label]] = [
name if name is None or isinstance(name, tuple) else (name,) for name in pdf.index.names
]
columns = pdf.columns
column_labels: List[Label]
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
else:
column_labels = [(col,) for col in columns]
column_label_names: List[Optional[Label]] = [
name if name is None or isinstance(name, tuple) else (name,) for name in columns.names
]
prefer_timestamp_ntz = is_timestamp_ntz_preferred()
(
pdf,
index_columns,
index_fields,
data_columns,
data_fields,
) = InternalFrame.prepare_pandas_frame(pdf, prefer_timestamp_ntz=prefer_timestamp_ntz)
schema = StructType([field.struct_field for field in index_fields + data_fields])
sdf = default_session().createDataFrame(pdf, schema=schema)
return InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
@staticmethod
def prepare_pandas_frame(
pdf: pd.DataFrame, *, retain_index: bool = True, prefer_timestamp_ntz: bool = False
) -> Tuple[pd.DataFrame, List[str], List[InternalField], List[str], List[InternalField]]:
pdf = pdf.copy()
data_columns = [name_like_string(col) for col in pdf.columns]
pdf.columns = data_columns
if retain_index:
index_nlevels = pdf.index.nlevels
index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(index_nlevels)]
pdf.index.names = index_columns
reset_index = pdf.reset_index()
else:
index_nlevels = 0
index_columns = []
reset_index = pdf
index_dtypes = list(reset_index.dtypes)[:index_nlevels]
data_dtypes = list(reset_index.dtypes)[index_nlevels:]
for col, dtype in zip(reset_index.columns, reset_index.dtypes):
spark_type = infer_pd_series_spark_type(reset_index[col], dtype, prefer_timestamp_ntz)
reset_index[col] = DataTypeOps(dtype, spark_type).prepare(reset_index[col])
fields = [
InternalField(
dtype=dtype,
struct_field=StructField(
name=str(name),
dataType=infer_pd_series_spark_type(col, dtype, prefer_timestamp_ntz),
nullable=bool(col.isnull().any()),
),
)
for (name, col), dtype in zip(reset_index.iteritems(), index_dtypes + data_dtypes)
]
return (
reset_index,
index_columns,
fields[:index_nlevels],
data_columns,
fields[index_nlevels:],
)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.internal
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.internal.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.internal tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.internal,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| true | true |
1c32c4bd87d48b8ecbc4afc1533f653fe99d2f0f | 3,387 | py | Python | exot/util/analysers.py | ETHZ-TEC/exot_eengine | 7b7ce6cb949e1b0a02e716b03f2f9af751713b29 | [
"BSD-3-Clause"
] | null | null | null | exot/util/analysers.py | ETHZ-TEC/exot_eengine | 7b7ce6cb949e1b0a02e716b03f2f9af751713b29 | [
"BSD-3-Clause"
] | null | null | null | exot/util/analysers.py | ETHZ-TEC/exot_eengine | 7b7ce6cb949e1b0a02e716b03f2f9af751713b29 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Misc helpers"""
import numpy as np
import pandas as pd
from exot.util.attributedict import LabelMapping
def all_labels_in_dataset(experiment, sort_str=False, **kwargs):
for key in ["phase", "labelcolumn", "io"]:
if key not in kwargs.keys():
raise ValueError(f"key {key} not specified in kwargs!")
ingest_args = kwargs.copy()
labels = np.array([])
# for rep in range(1):
for rep in range(experiment.config.EXPERIMENT.PHASES[ingest_args["phase"]].repetitions):
ingest_args["io"]["rep"] = rep
# for cur_run in [experiment.phases[ingest_args['phase']]['antutu2']]:
for cur_run in experiment.phases[ingest_args["phase"]].values():
try:
cur_run.ingest(**ingest_args)
labels = np.concatenate(
(labels, cur_run.i_rawstream[ingest_args["labelcolumn"]].unique())
)
except:
print("Could not ingest run", cur_run)
labels = pd.DataFrame(labels)[0].unique().flatten()
if sort_str:
return _sort_str_labels(labels)
else:
return labels
def sort_str_labels(labels):
convert = lambda x: str(x)
return np.array(list(map(convert, labels)))
def generate_unique_labels_mapping(labels_keys, labels_str):
if labels_str is None:
for key in labels_keys:
if key not in labels_str.keys():
labels_str[key] = key
labels_map = dict(
[
(y, {"int": x, "str": str(labels_keys[x])})
for x, y in enumerate(set(labels_keys))
]
)
else:
labels_map = dict(
[(y, {"int": x, "str": labels_str[x]}) for x, y in enumerate(set(labels_keys))]
)
return LabelMapping(labels_map)
| 40.807229 | 92 | 0.679658 |
import numpy as np
import pandas as pd
from exot.util.attributedict import LabelMapping
def all_labels_in_dataset(experiment, sort_str=False, **kwargs):
for key in ["phase", "labelcolumn", "io"]:
if key not in kwargs.keys():
raise ValueError(f"key {key} not specified in kwargs!")
ingest_args = kwargs.copy()
labels = np.array([])
for rep in range(experiment.config.EXPERIMENT.PHASES[ingest_args["phase"]].repetitions):
ingest_args["io"]["rep"] = rep
for cur_run in experiment.phases[ingest_args["phase"]].values():
try:
cur_run.ingest(**ingest_args)
labels = np.concatenate(
(labels, cur_run.i_rawstream[ingest_args["labelcolumn"]].unique())
)
except:
print("Could not ingest run", cur_run)
labels = pd.DataFrame(labels)[0].unique().flatten()
if sort_str:
return _sort_str_labels(labels)
else:
return labels
def sort_str_labels(labels):
convert = lambda x: str(x)
return np.array(list(map(convert, labels)))
def generate_unique_labels_mapping(labels_keys, labels_str):
if labels_str is None:
for key in labels_keys:
if key not in labels_str.keys():
labels_str[key] = key
labels_map = dict(
[
(y, {"int": x, "str": str(labels_keys[x])})
for x, y in enumerate(set(labels_keys))
]
)
else:
labels_map = dict(
[(y, {"int": x, "str": labels_str[x]}) for x, y in enumerate(set(labels_keys))]
)
return LabelMapping(labels_map)
| true | true |
1c32c5bf3ef61e16be2fe366d916eb5e61b98c5c | 7,985 | py | Python | docs/conf.py | tony/sphinx-argparse | dcf53c8092e87e375388f8691a28254fbbd7ac24 | [
"MIT"
] | null | null | null | docs/conf.py | tony/sphinx-argparse | dcf53c8092e87e375388f8691a28254fbbd7ac24 | [
"MIT"
] | null | null | null | docs/conf.py | tony/sphinx-argparse | dcf53c8092e87e375388f8691a28254fbbd7ac24 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# sphinx-argparse documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 31 15:13:43 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or extra_modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinx-argparse'
copyright = u'2013, Alex Rudakov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pkg_resources # part of setuptools
version = pkg_resources.require("sphinx-argparse")[0].version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxargparsedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinx-argparse.tex', u'sphinx-argparse Documentation',
u'Alex Rudakov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinx-argparse', u'sphinx-argparse Documentation',
[u'Alex Rudakov'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sphinx-argparse', u'sphinx-argparse Documentation',
u'Alex Rudakov', 'sphinx-argparse', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 32.45935 | 100 | 0.717721 |
import sys, os
sys.path.insert(0, os.path.abspath('..'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'sphinx-argparse'
copyright = u'2013, Alex Rudakov'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pkg_resources # part of setuptools
version = pkg_resources.require("sphinx-argparse")[0].version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxargparsedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinx-argparse.tex', u'sphinx-argparse Documentation',
u'Alex Rudakov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinx-argparse', u'sphinx-argparse Documentation',
[u'Alex Rudakov'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sphinx-argparse', u'sphinx-argparse Documentation',
u'Alex Rudakov', 'sphinx-argparse', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| true | true |
1c32c6c2808bf6c3550db9b366434945d7d62c09 | 3,371 | py | Python | kin.py | yuri-almeid/Snackin-Assistant | 750aa6c0b29a810ee5d3fb85a24ccc5a384e25b0 | [
"Apache-2.0"
] | 1 | 2021-06-12T13:43:39.000Z | 2021-06-12T13:43:39.000Z | kin.py | yuri-almeid/Snackin-Assistant | 750aa6c0b29a810ee5d3fb85a24ccc5a384e25b0 | [
"Apache-2.0"
] | null | null | null | kin.py | yuri-almeid/Snackin-Assistant | 750aa6c0b29a810ee5d3fb85a24ccc5a384e25b0 | [
"Apache-2.0"
] | null | null | null |
## Importando Módulos
from ibm_watson import TextToSpeechV1 # Biblioteca IBM Watson para TTS
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator # Biblioteca IBM Watson para autenticação da API
from datetime import datetime # módulo de tempo
from datetime import date # Módulo de tempo (data)
import random
import os
def IBM_auth():
print(" >>> Iniciando autenticação do IBM Watson...")
print()
## Configurando chave da API IBM Watson
# Chave da API
apikey = 'wu9NYkYxx6jzVQrvrdRK3rCQk5et-VDTSpApnG9dDG2e'
url = 'https://api.us-south.text-to-speech.watson.cloud.ibm.com/instances/7583b6cf-bd90-4057-ac04-54fc91ed5c0e'
print(" >>> Chave: " + apikey )
# Autenticação da chave
try:
authenticator = IAMAuthenticator(apikey)
tts = TextToSpeechV1(authenticator=authenticator)
tts.set_service_url(url)
return tts
except :
print(">> Ocorreu um erro na autenticação")
exit()
## TTS
# Funçao que reproduz texto falado
def say(text, tts):
# Sintetiza voz a partir do texto desejado
with open('speech.mp3', 'wb') as audio_file:
res = tts.synthesize(text, accept='audio/mp3', voice='pt-BR_IsabelaV3Voice').get_result()
audio_file.write(res.content)
# toca mp3 pelo sistema linux
os.system('./say.sh')
def KIN(user_):
# Banco de respostas prontas
c_greeting = ['como vai?', 'como tem passado?', 'que bom te ver aqui!', 'fico feliz em te ver aqui',
'desejo boas compras!', 'te desejo uma boa experiência Snackin']
c_weekend = ['Aproveite o seu final de semana!',
'desejamos um excelente final de semana!']
tts = IBM_auth()
# Cria objeto do log
log = {'nome': '',
'data': {'data': '',
'hora': '',
'dia': ''}}
# user = request.args.get('name')
# from kin import *
# kin(request.args.get('name'))
user = str(user_)
user = user.replace("{","")
user = user.replace("}","")
user = user.replace("'","")
# Pega o nome e localização do cliente
name = user_
log['nome'] = name
spc = ' ' # apenas uma variável facilitatória
# Pega a hora exata
now = datetime.now()
day = datetime.today().strftime('%A')
# Inicia contagem de tempo de permanência na loja
timer_start = now.minute
# Salva data
log['data']['data'] = str(now.day) + '/' + str(now.month) + '/' + str(now.year)
# Salva hora
log['data']['hora'] = str(now.hour) + ':' + str(now.minute)
# Salva dia da semana
log['data']['dia'] = day
# Escolha a saudação correta para o horário
if int(now.hour) >= 4 and int(now.hour) < 12:
greating = 'Bom dia,'
elif int(now.hour) >= 12 and int(now.hour) < 19:
greating = 'Boa tarde,'
else:
greating = 'Boa noite,'
# Concatena a saudação com o nome do cliente e um complemento inicial
msg = greating + spc + name + ',' + spc + random.choice(c_greeting)
# Mensagem extra para final de semana (sextou)
if day == 'Friday' or day == 'Saturday':
msg = msg + ', ' + random.choice(c_weekend)
log['Mensagem'] = msg
# Colocar json response
print(log)
# sudo apt-get install sox
# sudo apt-get install sox libsox-fmt-all
say(msg, tts)
return True
| 29.831858 | 113 | 0.613171 |
ort TextToSpeechV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from datetime import datetime
from datetime import date
import random
import os
def IBM_auth():
print(" >>> Iniciando autenticação do IBM Watson...")
print()
CQk5et-VDTSpApnG9dDG2e'
url = 'https://api.us-south.text-to-speech.watson.cloud.ibm.com/instances/7583b6cf-bd90-4057-ac04-54fc91ed5c0e'
print(" >>> Chave: " + apikey )
try:
authenticator = IAMAuthenticator(apikey)
tts = TextToSpeechV1(authenticator=authenticator)
tts.set_service_url(url)
return tts
except :
print(">> Ocorreu um erro na autenticação")
exit()
say(text, tts):
with open('speech.mp3', 'wb') as audio_file:
res = tts.synthesize(text, accept='audio/mp3', voice='pt-BR_IsabelaV3Voice').get_result()
audio_file.write(res.content)
os.system('./say.sh')
def KIN(user_):
c_greeting = ['como vai?', 'como tem passado?', 'que bom te ver aqui!', 'fico feliz em te ver aqui',
'desejo boas compras!', 'te desejo uma boa experiência Snackin']
c_weekend = ['Aproveite o seu final de semana!',
'desejamos um excelente final de semana!']
tts = IBM_auth()
log = {'nome': '',
'data': {'data': '',
'hora': '',
'dia': ''}}
user = str(user_)
user = user.replace("{","")
user = user.replace("}","")
user = user.replace("'","")
# Pega o nome e localização do cliente
name = user_
log['nome'] = name
spc = ' ' # apenas uma variável facilitatória
# Pega a hora exata
now = datetime.now()
day = datetime.today().strftime('%A')
# Inicia contagem de tempo de permanência na loja
timer_start = now.minute
# Salva data
log['data']['data'] = str(now.day) + '/' + str(now.month) + '/' + str(now.year)
# Salva hora
log['data']['hora'] = str(now.hour) + ':' + str(now.minute)
# Salva dia da semana
log['data']['dia'] = day
# Escolha a saudação correta para o horário
if int(now.hour) >= 4 and int(now.hour) < 12:
greating = 'Bom dia,'
elif int(now.hour) >= 12 and int(now.hour) < 19:
greating = 'Boa tarde,'
else:
greating = 'Boa noite,'
# Concatena a saudação com o nome do cliente e um complemento inicial
msg = greating + spc + name + ',' + spc + random.choice(c_greeting)
# Mensagem extra para final de semana (sextou)
if day == 'Friday' or day == 'Saturday':
msg = msg + ', ' + random.choice(c_weekend)
log['Mensagem'] = msg
# Colocar json response
print(log)
# sudo apt-get install sox
# sudo apt-get install sox libsox-fmt-all
say(msg, tts)
return True
| true | true |
1c32c6ccaac73c2b146765c6a92a1e99e7b49e0f | 4,484 | py | Python | windcraft/text.py | marchdf/windcraft | 9334864171f10c7a2fa1f0da71938551cc45465a | [
"Apache-2.0"
] | null | null | null | windcraft/text.py | marchdf/windcraft | 9334864171f10c7a2fa1f0da71938551cc45465a | [
"Apache-2.0"
] | 2 | 2018-03-12T23:41:26.000Z | 2018-03-16T16:19:28.000Z | windcraft/text.py | marchdf/windcraft | 9334864171f10c7a2fa1f0da71938551cc45465a | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 National Renewable Energy Laboratory. This software
# is released under the license detailed in the file, LICENSE, which
# is located in the top-level directory structure.
# ========================================================================
#
# Imports
#
# ========================================================================
import pygame
import windcraft.colors as colors
import windcraft.fonts as fonts
# ========================================================================
#
# Class definitions
#
# ========================================================================
class Text():
"""This class displays the turbine text."""
def __init__(self):
"""Constructor for Text."""
self.colors = colors.Colors()
self.fonts = fonts.Fonts()
self.yoffset = 0.03 * pygame.display.get_surface().get_height()
def display(self, screen, num_turbines, max_turbines, power):
"""Display the text on the screen.
:param screen: pygame screen
:type screen: screen
:param num_turbines: number of turbines in farm
:type num_turbines: int
:param max_turbines: maximum number of turbines in farm
:type max_turbines: int
:param power: power generated by the farm
:type power: float
"""
# Title
xstart = 0.5 * pygame.display.get_surface().get_width()
ystart = 0.04 * pygame.display.get_surface().get_height()
text = self.fonts.types['large'].render(
"Build turbines to maximize power!", True, self.colors.black)
textpos = text.get_rect(centerx=xstart,
top=ystart)
screen.blit(text, textpos)
# # Counter
# xstart = 0.01 * pygame.display.get_surface().get_width()
# ystart = 0.12 * pygame.display.get_surface().get_height()
# text = self.fonts.types['medium'].render(
# "Turbines in farm: {0:d}".format(num_turbines), True, self.colors.black)
# textpos = text.get_rect(left=xstart,
# top=ystart)
# screen.blit(text, textpos)
# if num_turbines >= max_turbines:
# text = self.fonts.types['medium'].render("All turbines placed!",
# True,
# self.colors.black)
# screen.blit(text, [textpos[0], textpos[1] + self.yoffset])
# Power
xstart = 0.5 * pygame.display.get_surface().get_width()
ystart = 0.15 * pygame.display.get_surface().get_height()
scaling_factor = 100
text = self.fonts.types['medium'].render(
"Power produced: {0:.2f} kW".format(scaling_factor * power),
True,
self.colors.black)
textpos = text.get_rect(centerx=xstart,
top=ystart)
screen.blit(text, textpos)
# Instructions
xstart = 0.97 * pygame.display.get_surface().get_width()
ystart = 0.9 * pygame.display.get_surface().get_height()
text = self.fonts.types['medium'].render("[t] to toggle",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, textpos)
text = self.fonts.types['medium'].render("[u] to undo ",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, [textpos[0], textpos[1] + self.yoffset])
text = self.fonts.types['medium'].render("[r] to reset ",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, [textpos[0], textpos[1] + 2 * self.yoffset])
text = self.fonts.types['medium'].render("[q] to quit ",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, [textpos[0], textpos[1] + 3 * self.yoffset])
| 41.906542 | 86 | 0.48595 |
import pygame
import windcraft.colors as colors
import windcraft.fonts as fonts
class Text():
def __init__(self):
self.colors = colors.Colors()
self.fonts = fonts.Fonts()
self.yoffset = 0.03 * pygame.display.get_surface().get_height()
def display(self, screen, num_turbines, max_turbines, power):
xstart = 0.5 * pygame.display.get_surface().get_width()
ystart = 0.04 * pygame.display.get_surface().get_height()
text = self.fonts.types['large'].render(
"Build turbines to maximize power!", True, self.colors.black)
textpos = text.get_rect(centerx=xstart,
top=ystart)
screen.blit(text, textpos)
xstart = 0.5 * pygame.display.get_surface().get_width()
ystart = 0.15 * pygame.display.get_surface().get_height()
scaling_factor = 100
text = self.fonts.types['medium'].render(
"Power produced: {0:.2f} kW".format(scaling_factor * power),
True,
self.colors.black)
textpos = text.get_rect(centerx=xstart,
top=ystart)
screen.blit(text, textpos)
xstart = 0.97 * pygame.display.get_surface().get_width()
ystart = 0.9 * pygame.display.get_surface().get_height()
text = self.fonts.types['medium'].render("[t] to toggle",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, textpos)
text = self.fonts.types['medium'].render("[u] to undo ",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, [textpos[0], textpos[1] + self.yoffset])
text = self.fonts.types['medium'].render("[r] to reset ",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, [textpos[0], textpos[1] + 2 * self.yoffset])
text = self.fonts.types['medium'].render("[q] to quit ",
True,
self.colors.black)
textpos = text.get_rect(right=xstart,
bottom=ystart)
screen.blit(text, [textpos[0], textpos[1] + 3 * self.yoffset])
| true | true |
1c32c6ed3176ab052d74abbd5c969a01cd06cee4 | 10,605 | py | Python | test/test_provider_notification_subscriptions_api.py | vericred/vericred-python | be45691c821a595c3d77a561b2ca33049b1239b4 | [
"Apache-2.0"
] | 3 | 2016-08-10T23:39:11.000Z | 2021-08-25T02:39:39.000Z | test/test_provider_notification_subscriptions_api.py | vericred/vericred-python | be45691c821a595c3d77a561b2ca33049b1239b4 | [
"Apache-2.0"
] | 2 | 2016-05-27T12:44:08.000Z | 2016-08-24T18:19:36.000Z | test/test_provider_notification_subscriptions_api.py | vericred/vericred-python | be45691c821a595c3d77a561b2ca33049b1239b4 | [
"Apache-2.0"
] | 4 | 2016-05-27T08:18:14.000Z | 2021-08-25T02:41:18.000Z | # coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.apis.provider_notification_subscriptions_api import ProviderNotificationSubscriptionsApi
class TestProviderNotificationSubscriptionsApi(unittest.TestCase):
""" ProviderNotificationSubscriptionsApi unit test stubs """
def setUp(self):
self.api = vericred_client.apis.provider_notification_subscriptions_api.ProviderNotificationSubscriptionsApi()
def tearDown(self):
pass
def test_create_provider_notification_subscription(self):
"""
Test case for create_provider_notification_subscription
Subscribe
"""
pass
def test_delete_provider_notification_subscription(self):
"""
Test case for delete_provider_notification_subscription
Unsubscribe
"""
pass
def test_notify_provider_notification_subscription(self):
"""
Test case for notify_provider_notification_subscription
Webhook
"""
pass
if __name__ == '__main__':
unittest.main()
| 39.570896 | 228 | 0.671759 |
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.apis.provider_notification_subscriptions_api import ProviderNotificationSubscriptionsApi
class TestProviderNotificationSubscriptionsApi(unittest.TestCase):
def setUp(self):
self.api = vericred_client.apis.provider_notification_subscriptions_api.ProviderNotificationSubscriptionsApi()
def tearDown(self):
pass
def test_create_provider_notification_subscription(self):
pass
def test_delete_provider_notification_subscription(self):
pass
def test_notify_provider_notification_subscription(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
1c32c7f89a9fed5d1178980ccfdbc998795f0171 | 1,592 | py | Python | lib/surface/ml_engine/jobs/describe.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/surface/ml_engine/jobs/describe.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/surface/ml_engine/jobs/describe.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | 1 | 2020-07-24T20:13:29.000Z | 2020-07-24T20:13:29.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ml-engine jobs describe command."""
from googlecloudsdk.api_lib.ml_engine import jobs
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.command_lib.ml_engine import jobs_util
from googlecloudsdk.core import log
def _AddDescribeArgs(parser):
flags.JOB_NAME.AddToParser(parser)
flags.GetSummarizeFlag().AddToParser(parser)
class Describe(base.DescribeCommand):
"""Describe a Cloud ML Engine job."""
@staticmethod
def Args(parser):
_AddDescribeArgs(parser)
def Run(self, args):
job = jobs_util.Describe(jobs.JobsClient(), args.job)
self.job = job # Hack to make the Epilog method work
if args.summarize:
if args.format:
log.warn('--format is ignored when --summarize is present')
args.format = jobs_util.GetSummaryFormat(job)
return job
def Epilog(self, resources_were_displayed):
if resources_were_displayed:
jobs_util.PrintDescribeFollowUp(self.job.jobId)
| 33.87234 | 74 | 0.759422 |
from googlecloudsdk.api_lib.ml_engine import jobs
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.command_lib.ml_engine import jobs_util
from googlecloudsdk.core import log
def _AddDescribeArgs(parser):
flags.JOB_NAME.AddToParser(parser)
flags.GetSummarizeFlag().AddToParser(parser)
class Describe(base.DescribeCommand):
@staticmethod
def Args(parser):
_AddDescribeArgs(parser)
def Run(self, args):
job = jobs_util.Describe(jobs.JobsClient(), args.job)
self.job = job
if args.summarize:
if args.format:
log.warn('--format is ignored when --summarize is present')
args.format = jobs_util.GetSummaryFormat(job)
return job
def Epilog(self, resources_were_displayed):
if resources_were_displayed:
jobs_util.PrintDescribeFollowUp(self.job.jobId)
| true | true |
1c32c88929975fdde3db41aa931defc525144752 | 4,526 | py | Python | phyutil/phyapp/common/tornado/auth/crowd.py | frib-high-level-controls/phyhlc | 6486607e3aa0212054a12e9f2ad1a3ef15542f48 | [
"BSD-3-Clause"
] | 1 | 2018-03-22T15:18:54.000Z | 2018-03-22T15:18:54.000Z | phyutil/phyapp/common/tornado/auth/crowd.py | frib-high-level-controls/phyhlc | 6486607e3aa0212054a12e9f2ad1a3ef15542f48 | [
"BSD-3-Clause"
] | null | null | null | phyutil/phyapp/common/tornado/auth/crowd.py | frib-high-level-controls/phyhlc | 6486607e3aa0212054a12e9f2ad1a3ef15542f48 | [
"BSD-3-Clause"
] | null | null | null | # encoding: UTF-8
#
# Copyright (c) 2015, Facility for Rare Isotope Beams
#
#
"""
Implements authentication support using the Atlassian Crowd service RESTful API.
.. moduleauthor:: Dylan Maxwell <maxwelld@frib.msu.edu>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
from tornado.gen import coroutine
from tornado.gen import Return
from tornado.httputil import HTTPHeaders
from tornado.httpclient import HTTPError
from tornado.httpclient import HTTPRequest
from tornado.httpclient import AsyncHTTPClient
from tornado.escape import url_escape
from tornado.escape import json_decode
from tornado.escape import json_encode
from . import AuthProvider
from . import AuthUser
_LOGGER = logging.getLogger(__name__)
class CrowdAuthProvider(AuthProvider):
"""
AuthProvider implemention support Atlassian Crowd server using REST API.
The following application settings are used to configure this provider:
*crowd_auth_provider_url*: root URL of the Crowd API server
*crowd_auth_provider_username*: username to authenticate with Crowd API
*crowd_auth_provider_password*: password to authenticate with Crowd API
"""
_CROWD_AUTH_URL = "/rest/usermanagement/1/authentication"
_CONFIG_CROWD_URL = "auth_provider_crowd_url"
_CONFIG_CROWD_USERNAME = "auth_provider_crowd_username"
_CONFIG_CROWD_PASSWORD = "auth_provider_crowd_password"
def __init__(self, application):
super(CrowdAuthProvider,self).__init__(application)
settings = self.application.settings
if self._CONFIG_CROWD_URL in settings:
self._crowd_url = settings.get(self._CONFIG_CROWD_URL)
else:
raise RuntimeError("Settings '"+ self._CONFIG_CROWD_URL + "' not found")
if self._CONFIG_CROWD_USERNAME in settings:
self._crowd_username = settings.get(self._CONFIG_CROWD_USERNAME)
else:
raise RuntimeError("Settings '"+ self._CONFIG_CROWD_USERNAME +"' not found")
if self._CONFIG_CROWD_PASSWORD in settings:
self._crowd_password = settings.get(self._CONFIG_CROWD_PASSWORD)
else:
raise RuntimeError("Settings '"+ self._CONFIG_CROWD_PASSWORD +"' not found")
self._crowd_headers = HTTPHeaders({
"Accept":"application/json",
"Content-Type":"application/json"
})
self._client = AsyncHTTPClient()
@coroutine
def authenticate(self, username, password):
"""
Authenticate user with using the Crowd service API.
:returns: a Future that must a resolve to None or a valid AuthUser object.
"""
auth_url = self._crowd_url
auth_url += self._CROWD_AUTH_URL
auth_url += "?username="
auth_url += url_escape(username)
auth_body = { "value":password }
request = HTTPRequest(auth_url,
method="POST",
auth_mode="basic",
auth_username=self._crowd_username,
auth_password=self._crowd_password,
headers=self._crowd_headers,
body=json_encode(auth_body)
)
fetch_time = time.clock()
try:
response = yield self._client.fetch(request)
except HTTPError as e:
if e.code == 400:
# Expected status code from the Crowd API
# for unsuccessful user authentication.
body = json_decode(e.response.body)
_LOGGER.warn("Authentication failure for username: %s: %s",
username, body["message"])
return
# Re-raise execption
raise
fetch_time = (time.clock() - fetch_time) * 1000
if fetch_time > 100:
_LOGGER.warn("Authentication request success: %sms", fetch_time)
else:
_LOGGER.info("Authentication request success: %sms", fetch_time)
if "Set-Cookie" in response.headers:
if "Cookie" in self._crowd_headers:
del self._crowd_headers["Cookie"]
for cookie in response.headers.get_list("Set-Cookie"):
self._crowd_headers.add("Cookie", cookie)
body = json_decode(response.body)
if "name" not in body:
_LOGGER.warn("Missing 'name' attribute in Crowd response")
return
user = AuthUser()
user.username = body["name"]
raise Return(user)
| 32.561151 | 88 | 0.659744 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
from tornado.gen import coroutine
from tornado.gen import Return
from tornado.httputil import HTTPHeaders
from tornado.httpclient import HTTPError
from tornado.httpclient import HTTPRequest
from tornado.httpclient import AsyncHTTPClient
from tornado.escape import url_escape
from tornado.escape import json_decode
from tornado.escape import json_encode
from . import AuthProvider
from . import AuthUser
_LOGGER = logging.getLogger(__name__)
class CrowdAuthProvider(AuthProvider):
_CROWD_AUTH_URL = "/rest/usermanagement/1/authentication"
_CONFIG_CROWD_URL = "auth_provider_crowd_url"
_CONFIG_CROWD_USERNAME = "auth_provider_crowd_username"
_CONFIG_CROWD_PASSWORD = "auth_provider_crowd_password"
def __init__(self, application):
super(CrowdAuthProvider,self).__init__(application)
settings = self.application.settings
if self._CONFIG_CROWD_URL in settings:
self._crowd_url = settings.get(self._CONFIG_CROWD_URL)
else:
raise RuntimeError("Settings '"+ self._CONFIG_CROWD_URL + "' not found")
if self._CONFIG_CROWD_USERNAME in settings:
self._crowd_username = settings.get(self._CONFIG_CROWD_USERNAME)
else:
raise RuntimeError("Settings '"+ self._CONFIG_CROWD_USERNAME +"' not found")
if self._CONFIG_CROWD_PASSWORD in settings:
self._crowd_password = settings.get(self._CONFIG_CROWD_PASSWORD)
else:
raise RuntimeError("Settings '"+ self._CONFIG_CROWD_PASSWORD +"' not found")
self._crowd_headers = HTTPHeaders({
"Accept":"application/json",
"Content-Type":"application/json"
})
self._client = AsyncHTTPClient()
@coroutine
def authenticate(self, username, password):
auth_url = self._crowd_url
auth_url += self._CROWD_AUTH_URL
auth_url += "?username="
auth_url += url_escape(username)
auth_body = { "value":password }
request = HTTPRequest(auth_url,
method="POST",
auth_mode="basic",
auth_username=self._crowd_username,
auth_password=self._crowd_password,
headers=self._crowd_headers,
body=json_encode(auth_body)
)
fetch_time = time.clock()
try:
response = yield self._client.fetch(request)
except HTTPError as e:
if e.code == 400:
body = json_decode(e.response.body)
_LOGGER.warn("Authentication failure for username: %s: %s",
username, body["message"])
return
raise
fetch_time = (time.clock() - fetch_time) * 1000
if fetch_time > 100:
_LOGGER.warn("Authentication request success: %sms", fetch_time)
else:
_LOGGER.info("Authentication request success: %sms", fetch_time)
if "Set-Cookie" in response.headers:
if "Cookie" in self._crowd_headers:
del self._crowd_headers["Cookie"]
for cookie in response.headers.get_list("Set-Cookie"):
self._crowd_headers.add("Cookie", cookie)
body = json_decode(response.body)
if "name" not in body:
_LOGGER.warn("Missing 'name' attribute in Crowd response")
return
user = AuthUser()
user.username = body["name"]
raise Return(user)
| true | true |
1c32c8e2b728f4959affb9559606f07c687dac76 | 1,656 | py | Python | cyrax/events.py | piranha/cyrax | fd1c0473f0c69631339b2a5476933b00f604c643 | [
"0BSD"
] | 10 | 2015-12-18T22:39:02.000Z | 2021-03-03T15:11:05.000Z | cyrax/events.py | piranha/cyrax | fd1c0473f0c69631339b2a5476933b00f604c643 | [
"0BSD"
] | 9 | 2016-01-01T09:28:13.000Z | 2019-12-17T09:39:15.000Z | cyrax/events.py | piranha/cyrax | fd1c0473f0c69631339b2a5476933b00f604c643 | [
"0BSD"
] | 9 | 2015-12-31T23:18:07.000Z | 2020-08-10T10:56:46.000Z | # -*- coding: utf-8 -*-
"""
cyrax.events
~~~~~~~~~~~~~~~
:copyright: 2007-2008 by Armin Ronacher as glashammer.utils.events
:copyright: 2009 by Alexander Solovyov
:license: MIT
"""
import logging
from collections import deque
try:
from sys import intern
except ImportError:
pass
logger = logging.getLogger(__name__)
# `events` global is defined at the end
class EventManager(object):
"""Helper class that handles event listeners and event emitting.
"""
def __init__(self):
self._listeners = {}
self._last_listener = 0
def connect(self, event, callback, prepend=False):
"""Connect a callback to an event.
If `prepend` is True, prepend callback to a queue of callbacks.
"""
listener_id = self._last_listener
event = intern(event)
if event not in self._listeners:
self._listeners[event] = deque([callback])
elif prepend:
self._listeners[event].appendleft(callback)
else:
self._listeners[event].append(callback)
self._last_listener += 1
return listener_id
def emit(self, event, *args, **kwargs):
"""Emit a event and return a `EventResult` instance."""
if event != 'log':
logger.debug('Emit: %s (%s)' % (event, ', '.join(map(repr, args))))
return [cb(*args, **kwargs) for cb in self.iter(event)]
def iter(self, event):
"""Return an iterator for all listeners of a given name."""
if event not in self._listeners:
return iter(())
return iter(self._listeners[event])
events = EventManager()
| 25.476923 | 79 | 0.610507 |
import logging
from collections import deque
try:
from sys import intern
except ImportError:
pass
logger = logging.getLogger(__name__)
class EventManager(object):
def __init__(self):
self._listeners = {}
self._last_listener = 0
def connect(self, event, callback, prepend=False):
listener_id = self._last_listener
event = intern(event)
if event not in self._listeners:
self._listeners[event] = deque([callback])
elif prepend:
self._listeners[event].appendleft(callback)
else:
self._listeners[event].append(callback)
self._last_listener += 1
return listener_id
def emit(self, event, *args, **kwargs):
if event != 'log':
logger.debug('Emit: %s (%s)' % (event, ', '.join(map(repr, args))))
return [cb(*args, **kwargs) for cb in self.iter(event)]
def iter(self, event):
if event not in self._listeners:
return iter(())
return iter(self._listeners[event])
events = EventManager()
| true | true |
1c32c949f32cd734de31efaf4aa3e2b1f1b83a1c | 5,754 | py | Python | qlib/data/base.py | minkefusiji/qlib | 5ee2d9496b2ffb261b2ec077b1aac88298938040 | [
"MIT"
] | 2 | 2021-11-09T05:12:36.000Z | 2021-11-09T05:12:40.000Z | qlib/data/base.py | GuBuChang/qlib | 7b15682c637106d4da0801e942d889c5615ebb70 | [
"MIT"
] | null | null | null | qlib/data/base.py | GuBuChang/qlib | 7b15682c637106d4da0801e942d889c5615ebb70 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import division
from __future__ import print_function
import abc
import pandas as pd
from ..log import get_module_logger
class Expression(abc.ABC):
"""Expression base class"""
def __str__(self):
return type(self).__name__
def __repr__(self):
return str(self)
def __gt__(self, other):
from .ops import Gt
return Gt(self, other)
def __ge__(self, other):
from .ops import Ge
return Ge(self, other)
def __lt__(self, other):
from .ops import Lt
return Lt(self, other)
def __le__(self, other):
from .ops import Le
return Le(self, other)
def __eq__(self, other):
from .ops import Eq
return Eq(self, other)
def __ne__(self, other):
from .ops import Ne
return Ne(self, other)
def __add__(self, other):
from .ops import Add
return Add(self, other)
def __radd__(self, other):
from .ops import Add
return Add(other, self)
def __sub__(self, other):
from .ops import Sub
return Sub(self, other)
def __rsub__(self, other):
from .ops import Sub
return Sub(other, self)
def __mul__(self, other):
from .ops import Mul
return Mul(self, other)
def __rmul__(self, other):
from .ops import Mul
return Mul(self, other)
def __div__(self, other):
from .ops import Div
return Div(self, other)
def __rdiv__(self, other):
from .ops import Div
return Div(other, self)
def __truediv__(self, other):
from .ops import Div
return Div(self, other)
def __rtruediv__(self, other):
from .ops import Div
return Div(other, self)
def __pow__(self, other):
from .ops import Power
return Power(self, other)
def __and__(self, other):
from .ops import And
return And(self, other)
def __rand__(self, other):
from .ops import And
return And(other, self)
def __or__(self, other):
from .ops import Or
return Or(self, other)
def __ror__(self, other):
from .ops import Or
return Or(other, self)
def load(self, instrument, start_index, end_index, freq):
"""load feature
Parameters
----------
instrument : str
instrument code.
start_index : str
feature start index [in calendar].
end_index : str
feature end index [in calendar].
freq : str
feature frequency.
Returns
----------
pd.Series
feature series: The index of the series is the calendar index
"""
from .cache import H
# cache
args = str(self), instrument, start_index, end_index, freq
if args in H["f"]:
return H["f"][args]
if start_index is None or end_index is None or start_index > end_index:
raise ValueError("Invalid index range: {} {}".format(start_index, end_index))
try:
series = self._load_internal(instrument, start_index, end_index, freq)
except Exception:
get_module_logger("data").error(
f"Loading data error: instrument={instrument}, expression={str(self)}, "
f"start_index={start_index}, end_index={end_index}, freq={freq}"
)
raise
series.name = str(self)
H["f"][args] = series
return series
@abc.abstractmethod
def _load_internal(self, instrument, start_index, end_index, freq):
raise NotImplementedError("This function must be implemented in your newly defined feature")
@abc.abstractmethod
def get_longest_back_rolling(self):
"""Get the longest length of historical data the feature has accessed
This is designed for getting the needed range of the data to calculate
the features in specific range at first. However, situations like
Ref(Ref($close, -1), 1) can not be handled rightly.
So this will only used for detecting the length of historical data needed.
"""
# TODO: forward operator like Ref($close, -1) is not supported yet.
raise NotImplementedError("This function must be implemented in your newly defined feature")
@abc.abstractmethod
def get_extended_window_size(self):
"""get_extend_window_size
For to calculate this Operator in range[start_index, end_index]
We have to get the *leaf feature* in
range[start_index - lft_etd, end_index + rght_etd].
Returns
----------
(int, int)
lft_etd, rght_etd
"""
raise NotImplementedError("This function must be implemented in your newly defined feature")
class Feature(Expression):
"""Static Expression
This kind of feature will load data from provider
"""
def __init__(self, name=None):
if name:
self._name = name
else:
self._name = type(self).__name__
def __str__(self):
return "$" + self._name
def _load_internal(self, instrument, start_index, end_index, freq):
# load
from .data import FeatureD
return FeatureD.feature(instrument, str(self), start_index, end_index, freq)
def get_longest_back_rolling(self):
return 0
def get_extended_window_size(self):
return 0, 0
class ExpressionOps(Expression):
"""Operator Expression
This kind of feature will use operator for feature
construction on the fly.
"""
pass
| 24.381356 | 100 | 0.607404 |
from __future__ import division
from __future__ import print_function
import abc
import pandas as pd
from ..log import get_module_logger
class Expression(abc.ABC):
def __str__(self):
return type(self).__name__
def __repr__(self):
return str(self)
def __gt__(self, other):
from .ops import Gt
return Gt(self, other)
def __ge__(self, other):
from .ops import Ge
return Ge(self, other)
def __lt__(self, other):
from .ops import Lt
return Lt(self, other)
def __le__(self, other):
from .ops import Le
return Le(self, other)
def __eq__(self, other):
from .ops import Eq
return Eq(self, other)
def __ne__(self, other):
from .ops import Ne
return Ne(self, other)
def __add__(self, other):
from .ops import Add
return Add(self, other)
def __radd__(self, other):
from .ops import Add
return Add(other, self)
def __sub__(self, other):
from .ops import Sub
return Sub(self, other)
def __rsub__(self, other):
from .ops import Sub
return Sub(other, self)
def __mul__(self, other):
from .ops import Mul
return Mul(self, other)
def __rmul__(self, other):
from .ops import Mul
return Mul(self, other)
def __div__(self, other):
from .ops import Div
return Div(self, other)
def __rdiv__(self, other):
from .ops import Div
return Div(other, self)
def __truediv__(self, other):
from .ops import Div
return Div(self, other)
def __rtruediv__(self, other):
from .ops import Div
return Div(other, self)
def __pow__(self, other):
from .ops import Power
return Power(self, other)
def __and__(self, other):
from .ops import And
return And(self, other)
def __rand__(self, other):
from .ops import And
return And(other, self)
def __or__(self, other):
from .ops import Or
return Or(self, other)
def __ror__(self, other):
from .ops import Or
return Or(other, self)
def load(self, instrument, start_index, end_index, freq):
from .cache import H
args = str(self), instrument, start_index, end_index, freq
if args in H["f"]:
return H["f"][args]
if start_index is None or end_index is None or start_index > end_index:
raise ValueError("Invalid index range: {} {}".format(start_index, end_index))
try:
series = self._load_internal(instrument, start_index, end_index, freq)
except Exception:
get_module_logger("data").error(
f"Loading data error: instrument={instrument}, expression={str(self)}, "
f"start_index={start_index}, end_index={end_index}, freq={freq}"
)
raise
series.name = str(self)
H["f"][args] = series
return series
@abc.abstractmethod
def _load_internal(self, instrument, start_index, end_index, freq):
raise NotImplementedError("This function must be implemented in your newly defined feature")
@abc.abstractmethod
def get_longest_back_rolling(self):
raise NotImplementedError("This function must be implemented in your newly defined feature")
@abc.abstractmethod
def get_extended_window_size(self):
raise NotImplementedError("This function must be implemented in your newly defined feature")
class Feature(Expression):
def __init__(self, name=None):
if name:
self._name = name
else:
self._name = type(self).__name__
def __str__(self):
return "$" + self._name
def _load_internal(self, instrument, start_index, end_index, freq):
from .data import FeatureD
return FeatureD.feature(instrument, str(self), start_index, end_index, freq)
def get_longest_back_rolling(self):
return 0
def get_extended_window_size(self):
return 0, 0
class ExpressionOps(Expression):
pass
| true | true |
1c32c97738a15cff439f8342b36199b82ae5409a | 3,902 | py | Python | kmip/tests/unit/core/test_policy.py | vbnmmnbv/PyKMIP | 4617ae528006178c466fe3945a477f568b596940 | [
"Apache-2.0"
] | 12 | 2016-09-14T21:59:10.000Z | 2020-03-11T07:37:25.000Z | kmip/tests/unit/core/test_policy.py | vbnmmnbv/PyKMIP | 4617ae528006178c466fe3945a477f568b596940 | [
"Apache-2.0"
] | null | null | null | kmip/tests/unit/core/test_policy.py | vbnmmnbv/PyKMIP | 4617ae528006178c466fe3945a477f568b596940 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shutil
import tempfile
import testtools
from kmip.core import enums
from kmip.core import policy
class TestPolicy(testtools.TestCase):
def setUp(self):
super(TestPolicy, self).setUp()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
def tearDown(self):
super(TestPolicy, self).tearDown()
def test_read_policy_from_file(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
policies = policy.read_policy_from_file(policy_file.name)
self.assertEqual(1, len(policies))
self.assertIn('test', policies.keys())
test_policy = {
enums.ObjectType.CERTIFICATE: {
enums.Operation.LOCATE: enums.Policy.ALLOW_ALL
}
}
self.assertEqual(test_policy, policies.get('test'))
def test_read_policy_from_file_empty(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write('')
args = (policy_file.name, )
regex = "An error occurred while attempting to parse the JSON file."
self.assertRaisesRegexp(
ValueError,
regex,
policy.read_policy_from_file,
*args
)
def test_read_policy_from_file_bad_object_type(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"INVALID": {"LOCATE": "ALLOW_ALL"}}}'
)
args = (policy_file.name, )
regex = "'INVALID' is not a valid ObjectType value."
self.assertRaisesRegexp(
ValueError,
regex,
policy.read_policy_from_file,
*args
)
def test_read_policy_from_file_bad_operation(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"INVALID": "ALLOW_ALL"}}}'
)
args = (policy_file.name, )
regex = "'INVALID' is not a valid Operation value."
self.assertRaisesRegexp(
ValueError,
regex,
policy.read_policy_from_file,
*args
)
def test_read_policy_from_file_bad_permission(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "INVALID"}}}'
)
args = (policy_file.name, )
regex = "'INVALID' is not a valid Policy value."
self.assertRaisesRegexp(
ValueError,
regex,
policy.read_policy_from_file,
*args
)
| 29.78626 | 76 | 0.59021 |
import shutil
import tempfile
import testtools
from kmip.core import enums
from kmip.core import policy
class TestPolicy(testtools.TestCase):
def setUp(self):
super(TestPolicy, self).setUp()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
def tearDown(self):
super(TestPolicy, self).tearDown()
def test_read_policy_from_file(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "ALLOW_ALL"}}}'
)
policies = policy.read_policy_from_file(policy_file.name)
self.assertEqual(1, len(policies))
self.assertIn('test', policies.keys())
test_policy = {
enums.ObjectType.CERTIFICATE: {
enums.Operation.LOCATE: enums.Policy.ALLOW_ALL
}
}
self.assertEqual(test_policy, policies.get('test'))
def test_read_policy_from_file_empty(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write('')
args = (policy_file.name, )
regex = "An error occurred while attempting to parse the JSON file."
self.assertRaisesRegexp(
ValueError,
regex,
policy.read_policy_from_file,
*args
)
def test_read_policy_from_file_bad_object_type(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"INVALID": {"LOCATE": "ALLOW_ALL"}}}'
)
args = (policy_file.name, )
regex = "'INVALID' is not a valid ObjectType value."
self.assertRaisesRegexp(
ValueError,
regex,
policy.read_policy_from_file,
*args
)
def test_read_policy_from_file_bad_operation(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"INVALID": "ALLOW_ALL"}}}'
)
args = (policy_file.name, )
regex = "'INVALID' is not a valid Operation value."
self.assertRaisesRegexp(
ValueError,
regex,
policy.read_policy_from_file,
*args
)
def test_read_policy_from_file_bad_permission(self):
policy_file = tempfile.NamedTemporaryFile(
dir=self.temp_dir,
delete=False
)
with open(policy_file.name, 'w') as f:
f.write(
'{"test": {"CERTIFICATE": {"LOCATE": "INVALID"}}}'
)
args = (policy_file.name, )
regex = "'INVALID' is not a valid Policy value."
self.assertRaisesRegexp(
ValueError,
regex,
policy.read_policy_from_file,
*args
)
| true | true |
1c32ca2f342192aa11548e66287cc2d2ff6b0c83 | 10,104 | py | Python | pyHeatTransfer/conduction.py | OSUmageed/pyHeatTransfer | b1db8ca7594a657826a1ccfb38a4e4eb102cce55 | [
"MIT"
] | 3 | 2017-07-30T19:01:21.000Z | 2021-06-03T23:28:16.000Z | pyHeatTransfer/conduction.py | OSUmageed/pyHeatTransfer | b1db8ca7594a657826a1ccfb38a4e4eb102cce55 | [
"MIT"
] | null | null | null | pyHeatTransfer/conduction.py | OSUmageed/pyHeatTransfer | b1db8ca7594a657826a1ccfb38a4e4eb102cce55 | [
"MIT"
] | 1 | 2020-06-28T08:28:59.000Z | 2020-06-28T08:28:59.000Z | #I suppose the idea is that it gets these values from somewhere and takes off.
#Regardless of where those values come from.
import os
import os.path as op
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import CoolProp.CoolProp as cp
import collections
import time
import random
from deco import concurrent, synchronized
sourcepath = op.abspath(op.dirname(__file__))
gitpath = op.dirname(sourcepath) #Top level of git repo
os.chdir(sourcepath)
sys.path.append(gitpath)
import geometry as geo
import SolidProp.PropertySI as sp
import convection as cv
shape_func = {
'Brick': lambda z, ht: True,
'Ziggurat' : lambda z, ht: (z%ht)
}
thispath = op.abspath(op.dirname(__file__))
toK = 273.15
tag = geo.tags
def contourmaker(Tg, XX, yspot):
npE = np.zeros_like(XX)
for key in Tg.keys():
x,y,z = key
if y != yspot:
continue
npE[z,x] = Tg[key]
return npE
def randT(T):
return 0.1*random.random() + T
#Doesn't do the ziggurat!
def make_grid(xi, xf, yi, yf, z, Ti, zFlag=""):
xFlag = ["E", "W"]
yFlag = ["S", "N"]
typr = dict()
Tmake = dict()
#First x row
gr = (xi, yi, z)
typr[gr] = xFlag[0]+yFlag[0]+zFlag
Tmake[gr] = randT(Ti)
for y in range(yi+1,yf):
gr = (xi, y, z)
typr[gr] = xFlag[0]+zFlag
Tmake[gr] = randT(Ti)
gr = (xi, yf, z)
typr[gr] = xFlag[0]+yFlag[1]+zFlag
Tmake[gr] = randT(Ti)
# All central x rows
for x in range(xi+1,xf):
gr = (x, yi, z)
typr[gr] = yFlag[0]+zFlag
Tmake[gr] = randT(Ti)
for y in range(yi+1,yf):
gr = (x, y, z)
typr[gr] = zFlag
Tmake[gr] = randT(Ti)
gr = (x,yf,z)
typr[gr] = yFlag[1]+zFlag
Tmake[gr] = randT(Ti)
#Last x row
gr = (xf, yi, z)
typr[gr] = xFlag[1]+yFlag[0]+zFlag
Tmake[gr] = randT(Ti)
for y in range(yi+1,yf):
gr = (xf, y, z)
typr[gr] = xFlag[1]+zFlag
Tmake[gr] = randT(Ti)
gr = (xf,yf,z)
typr[gr] = xFlag[1]+yFlag[1]+zFlag
Tmake[gr] = randT(Ti)
return Tmake, typr
def step_forwardf(Tg_in, ky, Pg, typD, V, A, dt, ds, Ta, h, ep, qVol):
ty = tag[typD]
cond_coefficient = Pg['A']/(V*ds*ty['Vc'])
cs = np.array(ky) + np.array(ty['Stencil'])
ck = []
for c in cs:
ck.append(Tg_in[tuple(c)])
conduction = cond_coefficient * (sum([ci*Ai*A for ci, Ai in list(zip(ty['Acond'],ck))]) -
Tg_in[ky]*A*sum(ty['Acond']))
cv_radiant = cv.ambientQ(Tg_in[ky], Ta, ty['Aconv'][0]*A, h, ep)/(V*ty['Vc']*Pg['D']*Pg['CP'])
return Tg_in[ky] + dt*(conduction + cv_radiant + qVol/Pg['D']*Pg['CP'])
def forward_call(Tg_in, Pg, typD, dt, ds, Ta, h, ep, qVol=0.0):
A = ds**2
V = ds**3
Tg_out = dict()
for key in Tg_in.keys():
Tg_out[key] = step_forwardf(Tg_in, key, Pg[key], typD[key], V,
A, dt, ds, Ta, h, ep, qVol)
return Tg_out
#could use scipy interpolate to do a spline. This is limited to just linear.
class SolidProperties(object):
def __init__(self, mat, Tgrid={}):
self.props = sp.get_props(mat)
self.pGrid = collections.defaultdict(dict)
self.update_props(Tgrid)
#Should accommodate lists in order to
def update_props(self, Tgrid):
for pt in Tgrid.keys():
for prop in self.props.keys():
self.pGrid[pt][prop] = np.interp(Tgrid[pt], self.props[prop][0, :], self.props[prop][1, :])
def query_props(self, Temp):
Tget = Temp if isinstance(Temp, list) else [Temp]
out = collections.defaultdict(dict)
for T in Tget:
for prop in self.props.keys():
out[T][prop] = np.interp(T, self.props[prop][0, :], self.props[prop][1, :])
return out
#Using Dict
class HeatSimulation(object):
def __init__(self, specificDict):
self.parameter_dict = specificDict
self.mat = specificDict['mat']
self.ds = specificDict['ds']
self.dt = specificDict['dt']
self.Ti = specificDict['Ti'] + toK
self.Ta = specificDict['Ta'] + toK
self.h = specificDict['h']
self.ep = specificDict['ep']
self.Lx = specificDict['Lx']
self.Ly = specificDict['Ly']
self.Lz = specificDict['Lz']
self.tF = specificDict['tFinal']
self.qVol = specificDict['qVol']
self.tNow = 0.0
self.Nx = int(self.Lx/self.ds)+1
self.Ny = int(self.Ly/self.ds)+1
self.Nz = int(self.Lz/self.ds)+1
self.A = self.ds**2
self.V = self.ds**3
self.xrng = np.arange(0, self.Lx + 2.0*self.ds, self.ds)
self.yrng = np.arange(0, self.Lz + 2.0*self.ds, self.ds)
self.Gx, self.Gz = np.meshgrid(self.xrng, self.yrng)
self.pPlot = np.zeros_like(self.Gx)
self.Tgrid, self.fGrid = self.__instantiate_grid()
self.mProps = SolidProperties(self.mat, self.Tgrid)
def __instantiate_grid(self):
xf, yf = self.Nx, self.Ny
xi, yi = 0, 0
Tuno, fGrid = make_grid(xi, xf, yi, yf, 0, self.Ti, zFlag="B")
cD = self.parameter_dict['stepD']
stepFunction = shape_func[self.parameter_dict['shape']]
for z in range(1,self.Nz):
if not stepFunction(z, self.parameter_dict['stepH']):
xi += cD
xf -= cD
yi += cD
yf -= cD
Tt, ft = make_grid(xi, xf, yi, yf, z, self.Ti)
Tuno.update(Tt)
fGrid.update(ft)
Tt, ft = make_grid(xi, xf, yi, yf, self.Nz, self.Ti, zFlag="U")
Tuno.update(Tt)
fGrid.update(ft)
return Tuno, fGrid
def step_forward(self):
Tg_out = dict()
for key in self.Tgrid.keys():
Tg_out[key] = self.__take_step(key)
self.tNow += self.dt
self.Tgrid = Tg_out
def __take_step(self, key):
ty = tag[self.fGrid[key]]
pG = self.mProps.pGrid[key]
#Alpha/(V*ds*Vcoeff)
cond_coefficient = pG['A']/(self.V * self.ds * ty['Vc'])
cs = np.array(key) + np.array(ty['Stencil'])
ck = []
for c in cs:
ck.append(self.Tgrid[tuple(c)])
conduction = cond_coefficient * (sum([ci*Ai*self.A for ci, Ai in list(zip(ty['Acond'],ck))]) -
self.Tgrid[key]*self.A*sum(ty['Acond']))
cv_radiant = cv.ambientQ(self.Tgrid[key], self.Ta, ty['Aconv'][0]*self.A, self.h, self.ep)/(self.V * ty['Vc'] * pG['D'] * pG['CP'])
return self.Tgrid[key] + self.dt*(conduction + cv_radiant + self.qVol/(pG['D']*pG['CP']))
def plot_step(self, ySpot):
for key in self.Tgrid.keys():
x,y,z = key
if y != ySpot:
continue
self.pPlot[z,x] = self.Tgrid[key]
#Make a run function for the class-based version.
def initialize_class(specificDict):
hsim = HeatSimulation(specificDict)
Gsize = hsim.Gx.shape
t = [time.time()]
while hsim.tNow < hsim.tF:
hsim.step_forward()
t.append(time.time())
print(hsim.tNow, t[-1]-t[-2])
hsim.plot_step(Gsize[1]//2)
CS = plt.contour(hsim.Gx, hsim.Gz, hsim.pPlot-toK, 5)
plt.title("yaxis = {:.3f}, t = {:.3f} s".format(yval, tnow))
plt.ylabel('Z axis')
plt.xlabel('X axis')
plt.clabel(CS, inline=1, fontsize=10)
plt.grid(True)
plt.show()
#Called by calling conduction without interface.
def initialize(specificDict):
ds = specificDict['ds']
Lx, Ly, Lz = specificDict['Lx'], specificDict['Ly'], specificDict['Lz']
Nx, Ny, Nz = int(Lx/ds)+1, int(Ly/ds)+1, int(Lz/ds)+1
Gx, Gz = np.meshgrid(np.arange(0,Lx+2.0*ds,ds), np.arange(0,Lz+2.0*ds,ds))
dt = specificDict['dt']
Ti = specificDict['Ti'] + toK
Ta, h, ep = specificDict['Ta'] + toK, specificDict['h'], specificDict['ep']
xf, yf = Nx, Ny
xi, yi = 0, 0
Tuno, fGrid = make_grid(xi, xf, yi, yf, 0, Ti, zFlag="B")
cD = specificDict['stepD']
stepFunction = shape_func[specificDict['shape']]
for z in range(1,Nz):
if not stepFunction(z, specificDict['stepH']):
xi += cD
xf -= cD
yi += cD
yf -= cD
Tt, ft = make_grid(xi, xf, yi, yf, z, Ti)
Tuno.update(Tt)
fGrid.update(ft)
Tt, ft = make_grid(xi, xf, yi, yf, Nz, Ti, zFlag="U")
Tuno.update(Tt)
fGrid.update(ft)
tnow = 0.0
yval = Ly/2
Gsize = Gx.shape
yplace = Gsize[1]//2
matProps = SolidProperties(specificDict['mat'], Tuno)
t = [time.time()]
print(Gsize, len(Tuno.keys()))
while tnow < specificDict['tFinal']:
Tdos = forward_call(Tuno, matProps.pGrid, fGrid, dt, ds, Ta, h, ep)
matProps.update_props(Tdos)
Tuno = forward_call(Tdos, matProps.pGrid, fGrid, dt, ds, Ta, h, ep)
matProps.update_props(Tuno)
tnow += dt*2.0
t.append(time.time())
print(tnow, t[-1]-t[-2])
Zv = contourmaker(Tuno, Gx, yplace)
CS = plt.contour(Gx, Gz, Zv-toK, 5)
plt.title("yaxis = {:.3f}, t = {:.3f} s".format(yval, tnow))
plt.ylabel('Z axis')
plt.xlabel('X axis')
plt.clabel(CS, inline=1, fontsize=10)
plt.grid(True)
plt.show()
return 'Yay'
if __name__ == "__main__":
import examples as ex
print("You have chosen to run a predefined example: ")
choice = bool(int(input("Enter 1 for ziggurat, 0 for brick: ")))
param = ex.zigg if choice else ex.bricky
param['tFinal'] = 10.0
initialize_class(param)
#initialize(param) | 29.630499 | 140 | 0.538104 |
import os
import os.path as op
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import CoolProp.CoolProp as cp
import collections
import time
import random
from deco import concurrent, synchronized
sourcepath = op.abspath(op.dirname(__file__))
gitpath = op.dirname(sourcepath)
os.chdir(sourcepath)
sys.path.append(gitpath)
import geometry as geo
import SolidProp.PropertySI as sp
import convection as cv
shape_func = {
'Brick': lambda z, ht: True,
'Ziggurat' : lambda z, ht: (z%ht)
}
thispath = op.abspath(op.dirname(__file__))
toK = 273.15
tag = geo.tags
def contourmaker(Tg, XX, yspot):
npE = np.zeros_like(XX)
for key in Tg.keys():
x,y,z = key
if y != yspot:
continue
npE[z,x] = Tg[key]
return npE
def randT(T):
return 0.1*random.random() + T
def make_grid(xi, xf, yi, yf, z, Ti, zFlag=""):
xFlag = ["E", "W"]
yFlag = ["S", "N"]
typr = dict()
Tmake = dict()
#First x row
gr = (xi, yi, z)
typr[gr] = xFlag[0]+yFlag[0]+zFlag
Tmake[gr] = randT(Ti)
for y in range(yi+1,yf):
gr = (xi, y, z)
typr[gr] = xFlag[0]+zFlag
Tmake[gr] = randT(Ti)
gr = (xi, yf, z)
typr[gr] = xFlag[0]+yFlag[1]+zFlag
Tmake[gr] = randT(Ti)
# All central x rows
for x in range(xi+1,xf):
gr = (x, yi, z)
typr[gr] = yFlag[0]+zFlag
Tmake[gr] = randT(Ti)
for y in range(yi+1,yf):
gr = (x, y, z)
typr[gr] = zFlag
Tmake[gr] = randT(Ti)
gr = (x,yf,z)
typr[gr] = yFlag[1]+zFlag
Tmake[gr] = randT(Ti)
#Last x row
gr = (xf, yi, z)
typr[gr] = xFlag[1]+yFlag[0]+zFlag
Tmake[gr] = randT(Ti)
for y in range(yi+1,yf):
gr = (xf, y, z)
typr[gr] = xFlag[1]+zFlag
Tmake[gr] = randT(Ti)
gr = (xf,yf,z)
typr[gr] = xFlag[1]+yFlag[1]+zFlag
Tmake[gr] = randT(Ti)
return Tmake, typr
def step_forwardf(Tg_in, ky, Pg, typD, V, A, dt, ds, Ta, h, ep, qVol):
ty = tag[typD]
cond_coefficient = Pg['A']/(V*ds*ty['Vc'])
cs = np.array(ky) + np.array(ty['Stencil'])
ck = []
for c in cs:
ck.append(Tg_in[tuple(c)])
conduction = cond_coefficient * (sum([ci*Ai*A for ci, Ai in list(zip(ty['Acond'],ck))]) -
Tg_in[ky]*A*sum(ty['Acond']))
cv_radiant = cv.ambientQ(Tg_in[ky], Ta, ty['Aconv'][0]*A, h, ep)/(V*ty['Vc']*Pg['D']*Pg['CP'])
return Tg_in[ky] + dt*(conduction + cv_radiant + qVol/Pg['D']*Pg['CP'])
def forward_call(Tg_in, Pg, typD, dt, ds, Ta, h, ep, qVol=0.0):
A = ds**2
V = ds**3
Tg_out = dict()
for key in Tg_in.keys():
Tg_out[key] = step_forwardf(Tg_in, key, Pg[key], typD[key], V,
A, dt, ds, Ta, h, ep, qVol)
return Tg_out
#could use scipy interpolate to do a spline. This is limited to just linear.
class SolidProperties(object):
def __init__(self, mat, Tgrid={}):
self.props = sp.get_props(mat)
self.pGrid = collections.defaultdict(dict)
self.update_props(Tgrid)
#Should accommodate lists in order to
def update_props(self, Tgrid):
for pt in Tgrid.keys():
for prop in self.props.keys():
self.pGrid[pt][prop] = np.interp(Tgrid[pt], self.props[prop][0, :], self.props[prop][1, :])
def query_props(self, Temp):
Tget = Temp if isinstance(Temp, list) else [Temp]
out = collections.defaultdict(dict)
for T in Tget:
for prop in self.props.keys():
out[T][prop] = np.interp(T, self.props[prop][0, :], self.props[prop][1, :])
return out
#Using Dict
class HeatSimulation(object):
def __init__(self, specificDict):
self.parameter_dict = specificDict
self.mat = specificDict['mat']
self.ds = specificDict['ds']
self.dt = specificDict['dt']
self.Ti = specificDict['Ti'] + toK
self.Ta = specificDict['Ta'] + toK
self.h = specificDict['h']
self.ep = specificDict['ep']
self.Lx = specificDict['Lx']
self.Ly = specificDict['Ly']
self.Lz = specificDict['Lz']
self.tF = specificDict['tFinal']
self.qVol = specificDict['qVol']
self.tNow = 0.0
self.Nx = int(self.Lx/self.ds)+1
self.Ny = int(self.Ly/self.ds)+1
self.Nz = int(self.Lz/self.ds)+1
self.A = self.ds**2
self.V = self.ds**3
self.xrng = np.arange(0, self.Lx + 2.0*self.ds, self.ds)
self.yrng = np.arange(0, self.Lz + 2.0*self.ds, self.ds)
self.Gx, self.Gz = np.meshgrid(self.xrng, self.yrng)
self.pPlot = np.zeros_like(self.Gx)
self.Tgrid, self.fGrid = self.__instantiate_grid()
self.mProps = SolidProperties(self.mat, self.Tgrid)
def __instantiate_grid(self):
xf, yf = self.Nx, self.Ny
xi, yi = 0, 0
Tuno, fGrid = make_grid(xi, xf, yi, yf, 0, self.Ti, zFlag="B")
cD = self.parameter_dict['stepD']
stepFunction = shape_func[self.parameter_dict['shape']]
for z in range(1,self.Nz):
if not stepFunction(z, self.parameter_dict['stepH']):
xi += cD
xf -= cD
yi += cD
yf -= cD
Tt, ft = make_grid(xi, xf, yi, yf, z, self.Ti)
Tuno.update(Tt)
fGrid.update(ft)
Tt, ft = make_grid(xi, xf, yi, yf, self.Nz, self.Ti, zFlag="U")
Tuno.update(Tt)
fGrid.update(ft)
return Tuno, fGrid
def step_forward(self):
Tg_out = dict()
for key in self.Tgrid.keys():
Tg_out[key] = self.__take_step(key)
self.tNow += self.dt
self.Tgrid = Tg_out
def __take_step(self, key):
ty = tag[self.fGrid[key]]
pG = self.mProps.pGrid[key]
#Alpha/(V*ds*Vcoeff)
cond_coefficient = pG['A']/(self.V * self.ds * ty['Vc'])
cs = np.array(key) + np.array(ty['Stencil'])
ck = []
for c in cs:
ck.append(self.Tgrid[tuple(c)])
conduction = cond_coefficient * (sum([ci*Ai*self.A for ci, Ai in list(zip(ty['Acond'],ck))]) -
self.Tgrid[key]*self.A*sum(ty['Acond']))
cv_radiant = cv.ambientQ(self.Tgrid[key], self.Ta, ty['Aconv'][0]*self.A, self.h, self.ep)/(self.V * ty['Vc'] * pG['D'] * pG['CP'])
return self.Tgrid[key] + self.dt*(conduction + cv_radiant + self.qVol/(pG['D']*pG['CP']))
def plot_step(self, ySpot):
for key in self.Tgrid.keys():
x,y,z = key
if y != ySpot:
continue
self.pPlot[z,x] = self.Tgrid[key]
#Make a run function for the class-based version.
def initialize_class(specificDict):
hsim = HeatSimulation(specificDict)
Gsize = hsim.Gx.shape
t = [time.time()]
while hsim.tNow < hsim.tF:
hsim.step_forward()
t.append(time.time())
print(hsim.tNow, t[-1]-t[-2])
hsim.plot_step(Gsize[1]//2)
CS = plt.contour(hsim.Gx, hsim.Gz, hsim.pPlot-toK, 5)
plt.title("yaxis = {:.3f}, t = {:.3f} s".format(yval, tnow))
plt.ylabel('Z axis')
plt.xlabel('X axis')
plt.clabel(CS, inline=1, fontsize=10)
plt.grid(True)
plt.show()
#Called by calling conduction without interface.
def initialize(specificDict):
ds = specificDict['ds']
Lx, Ly, Lz = specificDict['Lx'], specificDict['Ly'], specificDict['Lz']
Nx, Ny, Nz = int(Lx/ds)+1, int(Ly/ds)+1, int(Lz/ds)+1
Gx, Gz = np.meshgrid(np.arange(0,Lx+2.0*ds,ds), np.arange(0,Lz+2.0*ds,ds))
dt = specificDict['dt']
Ti = specificDict['Ti'] + toK
Ta, h, ep = specificDict['Ta'] + toK, specificDict['h'], specificDict['ep']
xf, yf = Nx, Ny
xi, yi = 0, 0
Tuno, fGrid = make_grid(xi, xf, yi, yf, 0, Ti, zFlag="B")
cD = specificDict['stepD']
stepFunction = shape_func[specificDict['shape']]
for z in range(1,Nz):
if not stepFunction(z, specificDict['stepH']):
xi += cD
xf -= cD
yi += cD
yf -= cD
Tt, ft = make_grid(xi, xf, yi, yf, z, Ti)
Tuno.update(Tt)
fGrid.update(ft)
Tt, ft = make_grid(xi, xf, yi, yf, Nz, Ti, zFlag="U")
Tuno.update(Tt)
fGrid.update(ft)
tnow = 0.0
yval = Ly/2
Gsize = Gx.shape
yplace = Gsize[1]//2
matProps = SolidProperties(specificDict['mat'], Tuno)
t = [time.time()]
print(Gsize, len(Tuno.keys()))
while tnow < specificDict['tFinal']:
Tdos = forward_call(Tuno, matProps.pGrid, fGrid, dt, ds, Ta, h, ep)
matProps.update_props(Tdos)
Tuno = forward_call(Tdos, matProps.pGrid, fGrid, dt, ds, Ta, h, ep)
matProps.update_props(Tuno)
tnow += dt*2.0
t.append(time.time())
print(tnow, t[-1]-t[-2])
Zv = contourmaker(Tuno, Gx, yplace)
CS = plt.contour(Gx, Gz, Zv-toK, 5)
plt.title("yaxis = {:.3f}, t = {:.3f} s".format(yval, tnow))
plt.ylabel('Z axis')
plt.xlabel('X axis')
plt.clabel(CS, inline=1, fontsize=10)
plt.grid(True)
plt.show()
return 'Yay'
if __name__ == "__main__":
import examples as ex
print("You have chosen to run a predefined example: ")
choice = bool(int(input("Enter 1 for ziggurat, 0 for brick: ")))
param = ex.zigg if choice else ex.bricky
param['tFinal'] = 10.0
initialize_class(param)
#initialize(param) | true | true |
1c32ca3aa49e20ae7571a8f8bceee4c37493a328 | 6,188 | py | Python | model/DynamicPricing.py | pranavsb/RL_smart_grid | b23b407d3c873171d9a2af6d5a0104a7bcadc6cd | [
"MIT"
] | 8 | 2019-09-06T08:05:23.000Z | 2022-03-24T23:40:55.000Z | model/DynamicPricing.py | pranavsb/RL_smart_grid | b23b407d3c873171d9a2af6d5a0104a7bcadc6cd | [
"MIT"
] | 1 | 2020-10-30T13:00:49.000Z | 2021-01-28T04:18:34.000Z | model/DynamicPricing.py | pranavsb/RL_smart_grid | b23b407d3c873171d9a2af6d5a0104a7bcadc6cd | [
"MIT"
] | 3 | 2020-09-12T16:21:44.000Z | 2020-11-05T20:33:34.000Z | from Environment import Environment
from QTableAgent import QTableAgent
import time, os
import numpy as np
import pickle
from Utils import get_battery_reward_factor
SOURCE_DEMAND_STATE = 'demand'
SOURCE_SMART_LOADS = True
SOURCE_LEARNING_RATE = 0.03
SOURCE_DISCOUNT_FACTOR = 0.95
SOURCE_NUM_LOADS = 10
SOURCE_MODE = 'vanilla'
LOAD_RANDOMIZE_BATTERY = 'rb'#True
LOAD_MODE = 'mode'
LOAD_DAY = 'day'#199999
LOAD_NUM_LOADS = 'ndl'
LOAD_LEARNING_RATE = 'lr'
LOAD_DISCOUNT_FACTOR = 'df'
LOAD_BATTERY_STATE = 'battery'
LOAD_PRICE_STATE = 'price'
MODEL_PATH = os.getcwd()#+'/basic_qlearning_models/dumloads'+str(NUM_DUM_LOADS)+'/df'+str(DISCOUNT_FACTOR)+'/lr'+str(LEARNING_RATE)
MODEL_PATH+='/dynamic_pricing_models'
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
if SOURCE_SMART_LOADS:
MODEL_PATH += '/smart'
else:
MODEL_PATH+='/dumb'
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
MODEL_PATH+='/df'+str(SOURCE_DISCOUNT_FACTOR)
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
MODEL_PATH+='/lr'+str(SOURCE_LEARNING_RATE)
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
load_agent_params = {
LOAD_RANDOMIZE_BATTERY:True,
LOAD_LEARNING_RATE: 0.03,
LOAD_DISCOUNT_FACTOR: 0.9,
LOAD_NUM_LOADS:999,
LOAD_DAY:99999,
LOAD_MODE:'vanilla'
}
LOAD_MODEL_PATH = os.getcwd()
LOAD_MODEL_PATH += '/basic_qlearning_models'
if load_agent_params[LOAD_RANDOMIZE_BATTERY]:
LOAD_MODEL_PATH+='/randomize_battery'
else:
LOAD_MODEL_PATH+='/continuous_battery'
LOAD_MODEL_PATH+= '/dumloads'+str(load_agent_params[LOAD_NUM_LOADS]) +'/df' + str(load_agent_params[LOAD_DISCOUNT_FACTOR]) + '/lr'+str(load_agent_params[LOAD_LEARNING_RATE])
def setup():
env = Environment()
# env.add_connections({0:[0]})
load_agent = None
if SOURCE_SMART_LOADS:
with open(
LOAD_MODEL_PATH + '/' + load_agent_params[LOAD_MODE] + '_agent_' + str(load_agent_params[LOAD_DAY]) + '.pickle',
'rb') as f:
load_agent = pickle.load(f)
env.add_connections({0:list(range(SOURCE_NUM_LOADS))})
else:
env.add_dumb_loads(0,SOURCE_NUM_LOADS)
env.set_environment_ready()
env.reset(True)
source_agent_dict = {0:QTableAgent(env.get_source_action_space(),
{SOURCE_DEMAND_STATE:env.get_overall_demand_bounds(0)},
{SOURCE_DEMAND_STATE:20},
default_action=1,
discount_factor=SOURCE_DISCOUNT_FACTOR
)}
source_agent_dict[0].set_learning_rate(SOURCE_LEARNING_RATE)
return env, source_agent_dict, load_agent
def train(startday=0, endday=200000):
start=time.time()
load_actions = {}
for day in range(startday, endday):
states = []
actions = []
max_change = 0
max_change_state_action = []
response = env.reset(True)
next_state = {SOURCE_DEMAND_STATE: response[0][0][0][0]}
source_agent_dict[0].update_state(next_state)
next_action = source_agent_dict[0].take_action()
for step in range(env.get_max_timestep()+1):
# print(env.get_current_timestep(),step)
current_state = next_state
current_action = next_action
actions.append(current_action)
if SOURCE_SMART_LOADS:
for i in range(SOURCE_NUM_LOADS):
load_actions[i] = load_agent.get_action(
{LOAD_BATTERY_STATE: response[1][i][0][0], LOAD_PRICE_STATE: response[1][i][0][1][-1]})
response = env.step(sourceActionDict={0:current_action}, loadActionDict=load_actions)
next_state = {SOURCE_DEMAND_STATE:response[0][0][0][0]}
states.append(current_state)
source_agent_dict[0].update_state(next_state)
if SOURCE_MODE is 'vanilla':
max_change = max(abs(
source_agent_dict[0].update_qtable(
current_state=current_state, current_action=current_action,
reward = response[0][0][1],
mode=SOURCE_MODE, next_state = next_state
)), max_change) #response should be negative
next_action = source_agent_dict[0].take_action()
elif SOURCE_MODE is 'sarsa':
next_action = source_agent_dict[0].take_action()
max_change = max(abs(
source_agent_dict[0].update_qtable(
current_state=current_state, current_action=current_action,
reward = response[0][0][1],
next_state=next_state, next_action=next_action, mode=SOURCE_MODE, #clip=[-25,25] # clip the increments to a certain range
)), max_change) # response should be negative
max_change_state_action = [source_agent_dict[0].state,current_action]
print(day,':',source_agent_dict[0].get_explore_rate(day),':',max_change,':',max_change_state_action,':',np.mean(source_agent_dict[0].qtable))
if max_change<0.001:
break
source_agent_dict[0].set_explore_rate(source_agent_dict[0].get_explore_rate(day))
# load_agent_dict[0].set_learning_rate(load_agent_dict[0].get_learning_rate(day))
if (day+1)%500==0:
source_agent_dict[0].update_policy()
# np.save(MODEL_PATH+'/qtable_'+str(day),load_agent_dict[0].qtable)
# np.save(MODEL_PATH+'/visitcounts_'+str(day),load_agent_dict[0].visit_counts)
# np.save(MODEL_PATH+'/policy_'+str(day),load_agent_dict[0].policy)
with open(MODEL_PATH+'/'+SOURCE_MODE+'_agent_'+str(day)+'.pickle', 'wb') as f:
pickle.dump(source_agent_dict[0], f)
end = time.time()
return end-start
env, source_agent_dict, load_agent = setup()
timetaken = train(0,10000)
| 40.444444 | 173 | 0.627182 | from Environment import Environment
from QTableAgent import QTableAgent
import time, os
import numpy as np
import pickle
from Utils import get_battery_reward_factor
SOURCE_DEMAND_STATE = 'demand'
SOURCE_SMART_LOADS = True
SOURCE_LEARNING_RATE = 0.03
SOURCE_DISCOUNT_FACTOR = 0.95
SOURCE_NUM_LOADS = 10
SOURCE_MODE = 'vanilla'
LOAD_RANDOMIZE_BATTERY = 'rb'
LOAD_MODE = 'mode'
LOAD_DAY = 'day'
LOAD_NUM_LOADS = 'ndl'
LOAD_LEARNING_RATE = 'lr'
LOAD_DISCOUNT_FACTOR = 'df'
LOAD_BATTERY_STATE = 'battery'
LOAD_PRICE_STATE = 'price'
MODEL_PATH = os.getcwd()
MODEL_PATH+='/dynamic_pricing_models'
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
if SOURCE_SMART_LOADS:
MODEL_PATH += '/smart'
else:
MODEL_PATH+='/dumb'
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
MODEL_PATH+='/df'+str(SOURCE_DISCOUNT_FACTOR)
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
MODEL_PATH+='/lr'+str(SOURCE_LEARNING_RATE)
if not os.path.isdir(MODEL_PATH):
os.makedirs(MODEL_PATH)
load_agent_params = {
LOAD_RANDOMIZE_BATTERY:True,
LOAD_LEARNING_RATE: 0.03,
LOAD_DISCOUNT_FACTOR: 0.9,
LOAD_NUM_LOADS:999,
LOAD_DAY:99999,
LOAD_MODE:'vanilla'
}
LOAD_MODEL_PATH = os.getcwd()
LOAD_MODEL_PATH += '/basic_qlearning_models'
if load_agent_params[LOAD_RANDOMIZE_BATTERY]:
LOAD_MODEL_PATH+='/randomize_battery'
else:
LOAD_MODEL_PATH+='/continuous_battery'
LOAD_MODEL_PATH+= '/dumloads'+str(load_agent_params[LOAD_NUM_LOADS]) +'/df' + str(load_agent_params[LOAD_DISCOUNT_FACTOR]) + '/lr'+str(load_agent_params[LOAD_LEARNING_RATE])
def setup():
env = Environment()
load_agent = None
if SOURCE_SMART_LOADS:
with open(
LOAD_MODEL_PATH + '/' + load_agent_params[LOAD_MODE] + '_agent_' + str(load_agent_params[LOAD_DAY]) + '.pickle',
'rb') as f:
load_agent = pickle.load(f)
env.add_connections({0:list(range(SOURCE_NUM_LOADS))})
else:
env.add_dumb_loads(0,SOURCE_NUM_LOADS)
env.set_environment_ready()
env.reset(True)
source_agent_dict = {0:QTableAgent(env.get_source_action_space(),
{SOURCE_DEMAND_STATE:env.get_overall_demand_bounds(0)},
{SOURCE_DEMAND_STATE:20},
default_action=1,
discount_factor=SOURCE_DISCOUNT_FACTOR
)}
source_agent_dict[0].set_learning_rate(SOURCE_LEARNING_RATE)
return env, source_agent_dict, load_agent
def train(startday=0, endday=200000):
start=time.time()
load_actions = {}
for day in range(startday, endday):
states = []
actions = []
max_change = 0
max_change_state_action = []
response = env.reset(True)
next_state = {SOURCE_DEMAND_STATE: response[0][0][0][0]}
source_agent_dict[0].update_state(next_state)
next_action = source_agent_dict[0].take_action()
for step in range(env.get_max_timestep()+1):
current_state = next_state
current_action = next_action
actions.append(current_action)
if SOURCE_SMART_LOADS:
for i in range(SOURCE_NUM_LOADS):
load_actions[i] = load_agent.get_action(
{LOAD_BATTERY_STATE: response[1][i][0][0], LOAD_PRICE_STATE: response[1][i][0][1][-1]})
response = env.step(sourceActionDict={0:current_action}, loadActionDict=load_actions)
next_state = {SOURCE_DEMAND_STATE:response[0][0][0][0]}
states.append(current_state)
source_agent_dict[0].update_state(next_state)
if SOURCE_MODE is 'vanilla':
max_change = max(abs(
source_agent_dict[0].update_qtable(
current_state=current_state, current_action=current_action,
reward = response[0][0][1],
mode=SOURCE_MODE, next_state = next_state
)), max_change)
next_action = source_agent_dict[0].take_action()
elif SOURCE_MODE is 'sarsa':
next_action = source_agent_dict[0].take_action()
max_change = max(abs(
source_agent_dict[0].update_qtable(
current_state=current_state, current_action=current_action,
reward = response[0][0][1],
next_state=next_state, next_action=next_action, mode=SOURCE_MODE,
max_change_state_action = [source_agent_dict[0].state,current_action]
print(day,':',source_agent_dict[0].get_explore_rate(day),':',max_change,':',max_change_state_action,':',np.mean(source_agent_dict[0].qtable))
if max_change<0.001:
break
source_agent_dict[0].set_explore_rate(source_agent_dict[0].get_explore_rate(day))
if (day+1)%500==0:
source_agent_dict[0].update_policy()
with open(MODEL_PATH+'/'+SOURCE_MODE+'_agent_'+str(day)+'.pickle', 'wb') as f:
pickle.dump(source_agent_dict[0], f)
end = time.time()
return end-start
env, source_agent_dict, load_agent = setup()
timetaken = train(0,10000)
| true | true |
1c32cb7103a6a39e5bcb355a0f36e5866914d3b6 | 298,432 | py | Python | src/oci/os_management/os_management_client.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2020-09-10T22:09:45.000Z | 2021-12-24T17:00:07.000Z | src/oci/os_management/os_management_client.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/os_management/os_management_client.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry, circuit_breaker # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import os_management_type_mapping
missing = Sentinel("Missing")
class OsManagementClient(object):
"""
API for the OS Management service. Use these API operations for working
with Managed instances and Managed instance groups.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
:param obj circuit_breaker_strategy: (optional)
A circuit breaker strategy to apply to all calls made by this service client (i.e. at the client level).
This client uses :py:data:`~oci.circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY` as default if no circuit breaker strategy is provided.
The specifics of circuit breaker strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/circuit_breakers.html>`__.
:param function circuit_breaker_callback: (optional)
Callback function to receive any exceptions triggerred by the circuit breaker.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20190801',
'service_endpoint_template': 'https://osms.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False),
'circuit_breaker_strategy': kwargs.get('circuit_breaker_strategy', circuit_breaker.GLOBAL_CIRCUIT_BREAKER_STRATEGY)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
if base_client_init_kwargs.get('circuit_breaker_strategy') is None:
base_client_init_kwargs['circuit_breaker_strategy'] = circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY
self.base_client = BaseClient("os_management", config, signer, os_management_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
self.circuit_breaker_callback = kwargs.get('circuit_breaker_callback')
def add_packages_to_software_source(self, software_source_id, add_packages_to_software_source_details, **kwargs):
"""
Adds a given list of Software Packages to a specific Software Source.
:param str software_source_id: (required)
The OCID of the software source.
:param oci.os_management.models.AddPackagesToSoftwareSourceDetails add_packages_to_software_source_details: (required)
A list of package identifiers
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/add_packages_to_software_source.py.html>`__ to see an example of how to use add_packages_to_software_source API.
"""
resource_path = "/softwareSources/{softwareSourceId}/actions/addPackages"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"add_packages_to_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_packages_to_software_source_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_packages_to_software_source_details)
def attach_child_software_source_to_managed_instance(self, managed_instance_id, attach_child_software_source_to_managed_instance_details, **kwargs):
"""
Adds a child software source to a managed instance. After the software
source has been added, then packages from that software source can be
installed on the managed instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param oci.os_management.models.AttachChildSoftwareSourceToManagedInstanceDetails attach_child_software_source_to_managed_instance_details: (required)
Details for attaching a Software Source to a Managed Instance
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/attach_child_software_source_to_managed_instance.py.html>`__ to see an example of how to use attach_child_software_source_to_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/attachChildSoftwareSource"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"attach_child_software_source_to_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=attach_child_software_source_to_managed_instance_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=attach_child_software_source_to_managed_instance_details)
def attach_managed_instance_to_managed_instance_group(self, managed_instance_group_id, managed_instance_id, **kwargs):
"""
Adds a Managed Instance to a Managed Instance Group. After the Managed
Instance has been added, then operations can be performed on the Managed
Instance Group which will then apply to all Managed Instances in the
group.
:param str managed_instance_group_id: (required)
OCID for the managed instance group
:param str managed_instance_id: (required)
OCID for the managed instance
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/attach_managed_instance_to_managed_instance_group.py.html>`__ to see an example of how to use attach_managed_instance_to_managed_instance_group API.
"""
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}/actions/attachManagedInstance"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"attach_managed_instance_to_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"managedInstanceId": managed_instance_id
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def attach_parent_software_source_to_managed_instance(self, managed_instance_id, attach_parent_software_source_to_managed_instance_details, **kwargs):
"""
Adds a parent software source to a managed instance. After the software
source has been added, then packages from that software source can be
installed on the managed instance. Software sources that have this
software source as a parent will be able to be added to this managed instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param oci.os_management.models.AttachParentSoftwareSourceToManagedInstanceDetails attach_parent_software_source_to_managed_instance_details: (required)
Details for attaching a Software Source to a Managed Instance
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/attach_parent_software_source_to_managed_instance.py.html>`__ to see an example of how to use attach_parent_software_source_to_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/attachParentSoftwareSource"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"attach_parent_software_source_to_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=attach_parent_software_source_to_managed_instance_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=attach_parent_software_source_to_managed_instance_details)
def change_managed_instance_group_compartment(self, managed_instance_group_id, change_managed_instance_group_compartment_details, **kwargs):
"""
Moves a resource into a different compartment. When provided, If-Match
is checked against ETag values of the resource.
:param str managed_instance_group_id: (required)
OCID for the managed instance group
:param oci.os_management.models.ChangeManagedInstanceGroupCompartmentDetails change_managed_instance_group_compartment_details: (required)
OCID for the compartment to which the resource will be moved.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/change_managed_instance_group_compartment.py.html>`__ to see an example of how to use change_managed_instance_group_compartment API.
"""
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_managed_instance_group_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_managed_instance_group_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_managed_instance_group_compartment_details)
def change_scheduled_job_compartment(self, scheduled_job_id, change_scheduled_job_compartment_details, **kwargs):
"""
Moves a resource into a different compartment. When provided, If-Match
is checked against ETag values of the resource.
:param str scheduled_job_id: (required)
The ID of the scheduled job.
:param oci.os_management.models.ChangeScheduledJobCompartmentDetails change_scheduled_job_compartment_details: (required)
OCID for the compartment to which the resource will be moved.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/change_scheduled_job_compartment.py.html>`__ to see an example of how to use change_scheduled_job_compartment API.
"""
resource_path = "/scheduledJobs/{scheduledJobId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_scheduled_job_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_scheduled_job_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_scheduled_job_compartment_details)
def change_software_source_compartment(self, software_source_id, change_software_source_compartment_details, **kwargs):
"""
Moves a resource into a different compartment. When provided, If-Match
is checked against ETag values of the resource.
:param str software_source_id: (required)
The OCID of the software source.
:param oci.os_management.models.ChangeSoftwareSourceCompartmentDetails change_software_source_compartment_details: (required)
OCID for the compartment to which the resource will be moved.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/change_software_source_compartment.py.html>`__ to see an example of how to use change_software_source_compartment API.
"""
resource_path = "/softwareSources/{softwareSourceId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_software_source_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_software_source_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_software_source_compartment_details)
def create_managed_instance_group(self, create_managed_instance_group_details, **kwargs):
"""
Creates a new Managed Instance Group on the management system.
This will not contain any managed instances after it is first created,
and they must be added later.
:param oci.os_management.models.CreateManagedInstanceGroupDetails create_managed_instance_group_details: (required)
Details about a Managed Instance Group to create
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.ManagedInstanceGroup`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/create_managed_instance_group.py.html>`__ to see an example of how to use create_managed_instance_group API.
"""
resource_path = "/managedInstanceGroups"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_managed_instance_group_details,
response_type="ManagedInstanceGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_managed_instance_group_details,
response_type="ManagedInstanceGroup")
def create_scheduled_job(self, create_scheduled_job_details, **kwargs):
"""
Creates a new Scheduled Job to perform a specific package operation on
a set of managed instances or managed instance groups. Can be created
as a one-time execution in the future, or as a recurring execution
that repeats on a defined interval.
:param oci.os_management.models.CreateScheduledJobDetails create_scheduled_job_details: (required)
Details about a Scheduled Job to create
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.ScheduledJob`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/create_scheduled_job.py.html>`__ to see an example of how to use create_scheduled_job API.
"""
resource_path = "/scheduledJobs"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_scheduled_job got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_scheduled_job_details,
response_type="ScheduledJob")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_scheduled_job_details,
response_type="ScheduledJob")
def create_software_source(self, create_software_source_details, **kwargs):
"""
Creates a new custom Software Source on the management system.
This will not contain any packages after it is first created,
and they must be added later.
:param oci.os_management.models.CreateSoftwareSourceDetails create_software_source_details: (required)
Details about a Sofware Source to create
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.SoftwareSource`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/create_software_source.py.html>`__ to see an example of how to use create_software_source API.
"""
resource_path = "/softwareSources"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_software_source got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_software_source_details,
response_type="SoftwareSource")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_software_source_details,
response_type="SoftwareSource")
def delete_managed_instance_group(self, managed_instance_group_id, **kwargs):
"""
Deletes a Managed Instance Group from the management system
:param str managed_instance_group_id: (required)
OCID for the managed instance group
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/delete_managed_instance_group.py.html>`__ to see an example of how to use delete_managed_instance_group API.
"""
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_scheduled_job(self, scheduled_job_id, **kwargs):
"""
Cancels an existing Scheduled Job on the management system
:param str scheduled_job_id: (required)
The ID of the scheduled job.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/delete_scheduled_job.py.html>`__ to see an example of how to use delete_scheduled_job API.
"""
resource_path = "/scheduledJobs/{scheduledJobId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_scheduled_job got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_software_source(self, software_source_id, **kwargs):
"""
Deletes a custom Software Source on the management system
:param str software_source_id: (required)
The OCID of the software source.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/delete_software_source.py.html>`__ to see an example of how to use delete_software_source API.
"""
resource_path = "/softwareSources/{softwareSourceId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def detach_child_software_source_from_managed_instance(self, managed_instance_id, detach_child_software_source_from_managed_instance_details, **kwargs):
"""
Removes a child software source from a managed instance. Packages will no longer be able to be
installed from these software sources.
:param str managed_instance_id: (required)
OCID for the managed instance
:param oci.os_management.models.DetachChildSoftwareSourceFromManagedInstanceDetails detach_child_software_source_from_managed_instance_details: (required)
Details for detaching a Software Source from a Managed Instance
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/detach_child_software_source_from_managed_instance.py.html>`__ to see an example of how to use detach_child_software_source_from_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/detachChildSoftwareSource"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"detach_child_software_source_from_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=detach_child_software_source_from_managed_instance_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=detach_child_software_source_from_managed_instance_details)
def detach_managed_instance_from_managed_instance_group(self, managed_instance_group_id, managed_instance_id, **kwargs):
"""
Removes a Managed Instance from a Managed Instance Group.
:param str managed_instance_group_id: (required)
OCID for the managed instance group
:param str managed_instance_id: (required)
OCID for the managed instance
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/detach_managed_instance_from_managed_instance_group.py.html>`__ to see an example of how to use detach_managed_instance_from_managed_instance_group API.
"""
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}/actions/detachManagedInstance"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"detach_managed_instance_from_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"managedInstanceId": managed_instance_id
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def detach_parent_software_source_from_managed_instance(self, managed_instance_id, detach_parent_software_source_from_managed_instance_details, **kwargs):
"""
Removes a software source from a managed instance. All child software sources will also be removed
from the managed instance. Packages will no longer be able to be installed from these software sources.
:param str managed_instance_id: (required)
OCID for the managed instance
:param oci.os_management.models.DetachParentSoftwareSourceFromManagedInstanceDetails detach_parent_software_source_from_managed_instance_details: (required)
Details for detaching a Software Source from a Managed Instance
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/detach_parent_software_source_from_managed_instance.py.html>`__ to see an example of how to use detach_parent_software_source_from_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/detachParentSoftwareSource"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"detach_parent_software_source_from_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=detach_parent_software_source_from_managed_instance_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=detach_parent_software_source_from_managed_instance_details)
def get_erratum(self, erratum_id, **kwargs):
"""
Returns a specific erratum.
:param str erratum_id: (required)
The OCID of the erratum.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.Erratum`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/get_erratum.py.html>`__ to see an example of how to use get_erratum API.
"""
resource_path = "/errata/{erratumId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_erratum got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"erratumId": erratum_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Erratum")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Erratum")
def get_managed_instance(self, managed_instance_id, **kwargs):
"""
Returns a specific Managed Instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.ManagedInstance`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/get_managed_instance.py.html>`__ to see an example of how to use get_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagedInstance")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagedInstance")
def get_managed_instance_group(self, managed_instance_group_id, **kwargs):
"""
Returns a specific Managed Instance Group.
:param str managed_instance_group_id: (required)
OCID for the managed instance group
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.ManagedInstanceGroup`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/get_managed_instance_group.py.html>`__ to see an example of how to use get_managed_instance_group API.
"""
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagedInstanceGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagedInstanceGroup")
def get_scheduled_job(self, scheduled_job_id, **kwargs):
"""
Gets the detailed information for the Scheduled Job with the given ID.
:param str scheduled_job_id: (required)
The ID of the scheduled job.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.ScheduledJob`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/get_scheduled_job.py.html>`__ to see an example of how to use get_scheduled_job API.
"""
resource_path = "/scheduledJobs/{scheduledJobId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_scheduled_job got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ScheduledJob")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ScheduledJob")
def get_software_package(self, software_source_id, software_package_name, **kwargs):
"""
Returns a specific Software Package.
:param str software_source_id: (required)
The OCID of the software source.
:param str software_package_name: (required)
The id of the software package.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.SoftwarePackage`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/get_software_package.py.html>`__ to see an example of how to use get_software_package API.
"""
resource_path = "/softwareSources/{softwareSourceId}/softwarePackages/{softwarePackageName}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_software_package got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id,
"softwarePackageName": software_package_name
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="SoftwarePackage")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="SoftwarePackage")
def get_software_source(self, software_source_id, **kwargs):
"""
Returns a specific Software Source.
:param str software_source_id: (required)
The OCID of the software source.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.SoftwareSource`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/get_software_source.py.html>`__ to see an example of how to use get_software_source API.
"""
resource_path = "/softwareSources/{softwareSourceId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="SoftwareSource")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="SoftwareSource")
def get_windows_update(self, windows_update, **kwargs):
"""
Returns a Windows Update object.
:param str windows_update: (required)
The Windows Update
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.WindowsUpdate`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/get_windows_update.py.html>`__ to see an example of how to use get_windows_update API.
"""
resource_path = "/updates/{windowsUpdate}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_windows_update got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"windowsUpdate": windows_update
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WindowsUpdate")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WindowsUpdate")
def get_work_request(self, work_request_id, **kwargs):
"""
Gets the detailed information for the work request with the given ID.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.WorkRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/get_work_request.py.html>`__ to see an example of how to use get_work_request API.
"""
resource_path = "/workRequests/{workRequestId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
def install_all_package_updates_on_managed_instance(self, managed_instance_id, **kwargs):
"""
Install all of the available package updates for the managed instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str update_type: (optional)
The type of updates to be applied
Allowed values are: "SECURITY", "BUGFIX", "ENHANCEMENT", "OTHER", "KSPLICE", "ALL"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/install_all_package_updates_on_managed_instance.py.html>`__ to see an example of how to use install_all_package_updates_on_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/packages/updateAll"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"update_type",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_all_package_updates_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'update_type' in kwargs:
update_type_allowed_values = ["SECURITY", "BUGFIX", "ENHANCEMENT", "OTHER", "KSPLICE", "ALL"]
if kwargs['update_type'] not in update_type_allowed_values:
raise ValueError(
"Invalid value for `update_type`, must be one of {0}".format(update_type_allowed_values)
)
query_params = {
"updateType": kwargs.get("update_type", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_all_updates_on_managed_instance_group(self, managed_instance_group_id, **kwargs):
"""
Install all of the available updates for the Managed Instance Group.
:param str managed_instance_group_id: (required)
OCID for the managed instance group
:param str update_type: (optional)
The type of updates to be applied
Allowed values are: "SECURITY", "BUGFIX", "ENHANCEMENT", "OTHER", "KSPLICE", "ALL"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/install_all_updates_on_managed_instance_group.py.html>`__ to see an example of how to use install_all_updates_on_managed_instance_group API.
"""
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}/actions/updates/installAll"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"update_type",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_all_updates_on_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'update_type' in kwargs:
update_type_allowed_values = ["SECURITY", "BUGFIX", "ENHANCEMENT", "OTHER", "KSPLICE", "ALL"]
if kwargs['update_type'] not in update_type_allowed_values:
raise ValueError(
"Invalid value for `update_type`, must be one of {0}".format(update_type_allowed_values)
)
query_params = {
"updateType": kwargs.get("update_type", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_all_windows_updates_on_managed_instance(self, managed_instance_id, **kwargs):
"""
Install all of the available Windows updates for the managed instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str update_type: (optional)
The type of updates to be applied
Allowed values are: "SECURITY", "BUGFIX", "ENHANCEMENT", "OTHER", "KSPLICE", "ALL"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/install_all_windows_updates_on_managed_instance.py.html>`__ to see an example of how to use install_all_windows_updates_on_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/updates/installAll"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"update_type",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_all_windows_updates_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'update_type' in kwargs:
update_type_allowed_values = ["SECURITY", "BUGFIX", "ENHANCEMENT", "OTHER", "KSPLICE", "ALL"]
if kwargs['update_type'] not in update_type_allowed_values:
raise ValueError(
"Invalid value for `update_type`, must be one of {0}".format(update_type_allowed_values)
)
query_params = {
"updateType": kwargs.get("update_type", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_package_on_managed_instance(self, managed_instance_id, software_package_name, **kwargs):
"""
Installs a package on a managed instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str software_package_name: (required)
Package name
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/install_package_on_managed_instance.py.html>`__ to see an example of how to use install_package_on_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/packages/install"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_package_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"softwarePackageName": software_package_name
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_package_update_on_managed_instance(self, managed_instance_id, software_package_name, **kwargs):
"""
Updates a package on a managed instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str software_package_name: (required)
Package name
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/install_package_update_on_managed_instance.py.html>`__ to see an example of how to use install_package_update_on_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/packages/update"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_package_update_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"softwarePackageName": software_package_name
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_windows_update_on_managed_instance(self, managed_instance_id, windows_update_name, **kwargs):
"""
Installs a Windows update on a managed instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str windows_update_name: (required)
Unique identifier for the Windows update. NOTE - This is not an OCID,
but is a unique identifier assigned by Microsoft.
Example: `6981d463-cd91-4a26-b7c4-ea4ded9183ed`
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/install_windows_update_on_managed_instance.py.html>`__ to see an example of how to use install_windows_update_on_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/updates/install"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_windows_update_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"windowsUpdateName": windows_update_name
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def list_available_packages_for_managed_instance(self, managed_instance_id, **kwargs):
"""
Returns a list of packages available for install on the Managed Instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.InstallablePackageSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_available_packages_for_managed_instance.py.html>`__ to see an example of how to use list_available_packages_for_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/packages/available"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_available_packages_for_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstallablePackageSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstallablePackageSummary]")
def list_available_software_sources_for_managed_instance(self, managed_instance_id, **kwargs):
"""
Returns a list of available software sources for a Managed Instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.AvailableSoftwareSourceSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_available_software_sources_for_managed_instance.py.html>`__ to see an example of how to use list_available_software_sources_for_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/availableSoftwareSources"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_available_software_sources_for_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableSoftwareSourceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableSoftwareSourceSummary]")
def list_available_updates_for_managed_instance(self, managed_instance_id, **kwargs):
"""
Returns a list of available updates for a Managed Instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.AvailableUpdateSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_available_updates_for_managed_instance.py.html>`__ to see an example of how to use list_available_updates_for_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/packages/updates"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_available_updates_for_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableUpdateSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableUpdateSummary]")
def list_available_windows_updates_for_managed_instance(self, managed_instance_id, **kwargs):
"""
Returns a list of available Windows updates for a Managed Instance. This is only applicable to Windows instances.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str is_eligible_for_installation: (optional)
Indicator of whether the update can be installed using OSMS.
Allowed values are: "INSTALLABLE", "NOT_INSTALLABLE", "UNKNOWN"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.AvailableWindowsUpdateSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_available_windows_updates_for_managed_instance.py.html>`__ to see an example of how to use list_available_windows_updates_for_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/updates/available"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"is_eligible_for_installation"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_available_windows_updates_for_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'is_eligible_for_installation' in kwargs:
is_eligible_for_installation_allowed_values = ["INSTALLABLE", "NOT_INSTALLABLE", "UNKNOWN"]
if kwargs['is_eligible_for_installation'] not in is_eligible_for_installation_allowed_values:
raise ValueError(
"Invalid value for `is_eligible_for_installation`, must be one of {0}".format(is_eligible_for_installation_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"isEligibleForInstallation": kwargs.get("is_eligible_for_installation", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableWindowsUpdateSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableWindowsUpdateSummary]")
def list_errata(self, **kwargs):
"""
Returns a list of all of the currently available Errata in the system
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param str erratum_id: (optional)
The OCID of the erratum.
:param str advisory_name: (optional)
The assigned erratum name. It's unique and not changeable.
Example: `ELSA-2020-5804`
:param datetime time_issue_date_start: (optional)
The issue date after which to list all errata, in ISO 8601 format
Example: 2017-07-14T02:40:00.000Z
:param datetime time_issue_date_end: (optional)
The issue date before which to list all errata, in ISO 8601 format
Example: 2017-07-14T02:40:00.000Z
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort errata by. Only one sort order may be provided. Default order for ISSUEDATE is descending. Default order for ADVISORYNAME is ascending. If no value is specified ISSUEDATE is default.
Allowed values are: "ISSUEDATE", "ADVISORYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.ErratumSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_errata.py.html>`__ to see an example of how to use list_errata API.
"""
resource_path = "/errata"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"compartment_id",
"erratum_id",
"advisory_name",
"time_issue_date_start",
"time_issue_date_end",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_errata got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["ISSUEDATE", "ADVISORYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": kwargs.get("compartment_id", missing),
"erratumId": kwargs.get("erratum_id", missing),
"advisoryName": kwargs.get("advisory_name", missing),
"timeIssueDateStart": kwargs.get("time_issue_date_start", missing),
"timeIssueDateEnd": kwargs.get("time_issue_date_end", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ErratumSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ErratumSummary]")
def list_managed_instance_errata(self, managed_instance_id, **kwargs):
"""
Returns a list of errata relevant to the Managed Instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.ErratumSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_managed_instance_errata.py.html>`__ to see an example of how to use list_managed_instance_errata API.
"""
resource_path = "/managedInstances/{managedInstanceId}/errata"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_managed_instance_errata got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[ErratumSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[ErratumSummary]")
def list_managed_instance_groups(self, compartment_id, **kwargs):
"""
Returns a list of all Managed Instance Groups.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str lifecycle_state: (optional)
The current lifecycle state for the object.
Allowed values are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"
:param str os_family: (optional)
The OS family for which to list resources.
Allowed values are: "LINUX", "WINDOWS", "ALL"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.ManagedInstanceGroupSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_managed_instance_groups.py.html>`__ to see an example of how to use list_managed_instance_groups API.
"""
resource_path = "/managedInstanceGroups"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"lifecycle_state",
"os_family"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_managed_instance_groups got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"osFamily": kwargs.get("os_family", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ManagedInstanceGroupSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ManagedInstanceGroupSummary]")
def list_managed_instances(self, compartment_id, **kwargs):
"""
Returns a list of all Managed Instances.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str os_family: (optional)
The OS family for which to list resources.
Allowed values are: "LINUX", "WINDOWS", "ALL"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.ManagedInstanceSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_managed_instances.py.html>`__ to see an example of how to use list_managed_instances API.
"""
resource_path = "/managedInstances"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"os_family"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_managed_instances got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"osFamily": kwargs.get("os_family", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ManagedInstanceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ManagedInstanceSummary]")
def list_packages_installed_on_managed_instance(self, managed_instance_id, **kwargs):
"""
Returns a list of installed packages on the Managed Instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.InstalledPackageSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_packages_installed_on_managed_instance.py.html>`__ to see an example of how to use list_packages_installed_on_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/packages"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_packages_installed_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstalledPackageSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstalledPackageSummary]")
def list_scheduled_jobs(self, compartment_id, **kwargs):
"""
Returns a list of all of the currently active Scheduled Jobs in the system
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str managed_instance_id: (optional)
The ID of the managed instance for which to list resources.
:param str managed_instance_group_id: (optional)
The ID of the managed instace group for which to list resources.
:param str operation_type: (optional)
The operation type for which to list resources
Allowed values are: "INSTALL", "UPDATE", "REMOVE", "UPDATEALL"
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str lifecycle_state: (optional)
The current lifecycle state for the object.
Allowed values are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str os_family: (optional)
The OS family for which to list resources.
Allowed values are: "LINUX", "WINDOWS", "ALL"
:param bool is_restricted: (optional)
If true, will only filter out restricted Autonomous Linux Scheduled Job
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.ScheduledJobSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_scheduled_jobs.py.html>`__ to see an example of how to use list_scheduled_jobs API.
"""
resource_path = "/scheduledJobs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"managed_instance_id",
"managed_instance_group_id",
"operation_type",
"limit",
"page",
"sort_order",
"sort_by",
"lifecycle_state",
"opc_request_id",
"os_family",
"is_restricted"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_scheduled_jobs got unknown kwargs: {!r}".format(extra_kwargs))
if 'operation_type' in kwargs:
operation_type_allowed_values = ["INSTALL", "UPDATE", "REMOVE", "UPDATEALL"]
if kwargs['operation_type'] not in operation_type_allowed_values:
raise ValueError(
"Invalid value for `operation_type`, must be one of {0}".format(operation_type_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"managedInstanceId": kwargs.get("managed_instance_id", missing),
"managedInstanceGroupId": kwargs.get("managed_instance_group_id", missing),
"operationType": kwargs.get("operation_type", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"osFamily": kwargs.get("os_family", missing),
"isRestricted": kwargs.get("is_restricted", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ScheduledJobSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ScheduledJobSummary]")
def list_software_source_packages(self, software_source_id, **kwargs):
"""
Lists Software Packages in a Software Source
:param str software_source_id: (required)
The OCID of the software source.
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.SoftwarePackageSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_software_source_packages.py.html>`__ to see an example of how to use list_software_source_packages API.
"""
resource_path = "/softwareSources/{softwareSourceId}/softwarePackages"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"compartment_id",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_software_source_packages got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": kwargs.get("compartment_id", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwarePackageSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwarePackageSummary]")
def list_software_sources(self, compartment_id, **kwargs):
"""
Returns a list of all Software Sources.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str lifecycle_state: (optional)
The current lifecycle state for the object.
Allowed values are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.SoftwareSourceSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_software_sources.py.html>`__ to see an example of how to use list_software_sources API.
"""
resource_path = "/softwareSources"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"lifecycle_state",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_software_sources got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwareSourceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwareSourceSummary]")
def list_upcoming_scheduled_jobs(self, compartment_id, time_end, **kwargs):
"""
Returns a list of all of the Scheduled Jobs whose next execution time is at or before the specified time.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param datetime time_end: (required)
The cut-off time before which to list all upcoming schedules, in ISO 8601 format
Example: 2017-07-14T02:40:00.000Z
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str tag_name: (optional)
The name of the tag.
:param str tag_value: (optional)
The value for the tag.
:param str lifecycle_state: (optional)
The current lifecycle state for the object.
Allowed values are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str os_family: (optional)
The OS family for which to list resources.
Allowed values are: "LINUX", "WINDOWS", "ALL"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.ScheduledJobSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_upcoming_scheduled_jobs.py.html>`__ to see an example of how to use list_upcoming_scheduled_jobs API.
"""
resource_path = "/scheduledJobs/upcomingSchedules"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"tag_name",
"tag_value",
"lifecycle_state",
"opc_request_id",
"os_family"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_upcoming_scheduled_jobs got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"timeEnd": time_end,
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"tagName": kwargs.get("tag_name", missing),
"tagValue": kwargs.get("tag_value", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"osFamily": kwargs.get("os_family", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ScheduledJobSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ScheduledJobSummary]")
def list_windows_updates(self, **kwargs):
"""
Returns a list of Windows Updates.
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.WindowsUpdateSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_windows_updates.py.html>`__ to see an example of how to use list_windows_updates API.
"""
resource_path = "/updates"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"compartment_id",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_windows_updates got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": kwargs.get("compartment_id", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WindowsUpdateSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WindowsUpdateSummary]")
def list_windows_updates_installed_on_managed_instance(self, managed_instance_id, **kwargs):
"""
Returns a list of installed Windows updates for a Managed Instance. This is only applicable to Windows instances.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str compartment_id: (optional)
The ID of the compartment in which to list resources. This parameter is optional and in some cases may have no effect.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.InstalledWindowsUpdateSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_windows_updates_installed_on_managed_instance.py.html>`__ to see an example of how to use list_windows_updates_installed_on_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/updates/installed"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_windows_updates_installed_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstalledWindowsUpdateSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstalledWindowsUpdateSummary]")
def list_work_request_errors(self, work_request_id, **kwargs):
"""
Gets the errors for the work request with the given ID.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.WorkRequestError`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_work_request_errors.py.html>`__ to see an example of how to use list_work_request_errors API.
"""
resource_path = "/workRequests/{workRequestId}/errors"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_errors got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]")
def list_work_request_logs(self, work_request_id, **kwargs):
"""
Lists the log entries for the work request with the given ID.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.WorkRequestLogEntry`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_work_request_logs.py.html>`__ to see an example of how to use list_work_request_logs API.
"""
resource_path = "/workRequests/{workRequestId}/logs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]")
def list_work_requests(self, compartment_id, **kwargs):
"""
Lists the work requests in a compartment.
:param str compartment_id: (required)
The ID of the compartment in which to list resources.
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str managed_instance_id: (optional)
The ID of the managed instance for which to list resources.
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str os_family: (optional)
The OS family for which to list resources.
Allowed values are: "LINUX", "WINDOWS", "ALL"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.WorkRequestSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/list_work_requests.py.html>`__ to see an example of how to use list_work_requests API.
"""
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"managed_instance_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"os_family"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"managedInstanceId": kwargs.get("managed_instance_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"osFamily": kwargs.get("os_family", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
def remove_package_from_managed_instance(self, managed_instance_id, software_package_name, **kwargs):
"""
Removes an installed package from a managed instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param str software_package_name: (required)
Package name
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/remove_package_from_managed_instance.py.html>`__ to see an example of how to use remove_package_from_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}/actions/packages/remove"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"remove_package_from_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"softwarePackageName": software_package_name
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def remove_packages_from_software_source(self, software_source_id, remove_packages_from_software_source_details, **kwargs):
"""
Removes a given list of Software Packages from a specific Software Source.
:param str software_source_id: (required)
The OCID of the software source.
:param oci.os_management.models.RemovePackagesFromSoftwareSourceDetails remove_packages_from_software_source_details: (required)
A list of package identifiers
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/remove_packages_from_software_source.py.html>`__ to see an example of how to use remove_packages_from_software_source API.
"""
resource_path = "/softwareSources/{softwareSourceId}/actions/removePackages"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"remove_packages_from_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_packages_from_software_source_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_packages_from_software_source_details)
def run_scheduled_job_now(self, scheduled_job_id, **kwargs):
"""
This will trigger an already created Scheduled Job to being executing
immediately instead of waiting for its next regularly scheduled time.
:param str scheduled_job_id: (required)
The ID of the scheduled job.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/run_scheduled_job_now.py.html>`__ to see an example of how to use run_scheduled_job_now API.
"""
resource_path = "/scheduledJobs/{scheduledJobId}/actions/runNow"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"run_scheduled_job_now got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def search_software_packages(self, **kwargs):
"""
Searches all of the available Software Sources and returns any/all Software Packages matching
the search criteria.
:param str software_package_name: (optional)
the identifier for the software package (not an OCID)
:param str display_name: (optional)
A user-friendly name. Does not have to be unique, and it's changeable.
Example: `My new resource`
:param str cve_name: (optional)
The name of the CVE as published.
Example: `CVE-2006-4535`
:param int limit: (optional)
The maximum number of items to return.
:param str page: (optional)
The page token representing the page at which to start retrieving results. This is usually retrieved from a previous list call.
:param str sort_order: (optional)
The sort order to use, either 'asc' or 'desc'.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. If no value is specified TIMECREATED is default.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.os_management.models.SoftwarePackageSearchSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/search_software_packages.py.html>`__ to see an example of how to use search_software_packages API.
"""
resource_path = "/softwareSources/softwarePackages"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"software_package_name",
"display_name",
"cve_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"search_software_packages got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"softwarePackageName": kwargs.get("software_package_name", missing),
"displayName": kwargs.get("display_name", missing),
"cveName": kwargs.get("cve_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwarePackageSearchSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwarePackageSearchSummary]")
def skip_next_scheduled_job_execution(self, scheduled_job_id, **kwargs):
"""
This will force an already created Scheduled Job to skip its
next regularly scheduled execution
:param str scheduled_job_id: (required)
The ID of the scheduled job.
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or
server error without risk of executing that same action again. Retry tokens expire after 24
hours, but can be invalidated before then due to conflicting operations. For example, if a resource
has been deleted and purged from the system, then a retry of the original creation request
might be rejected.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/skip_next_scheduled_job_execution.py.html>`__ to see an example of how to use skip_next_scheduled_job_execution API.
"""
resource_path = "/scheduledJobs/{scheduledJobId}/actions/skipNextExecution"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"skip_next_scheduled_job_execution got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def update_managed_instance(self, managed_instance_id, update_managed_instance_details, **kwargs):
"""
Updates a specific Managed Instance.
:param str managed_instance_id: (required)
OCID for the managed instance
:param oci.os_management.models.UpdateManagedInstanceDetails update_managed_instance_details: (required)
Details about a Managed Instance to update
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.ManagedInstance`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/update_managed_instance.py.html>`__ to see an example of how to use update_managed_instance API.
"""
resource_path = "/managedInstances/{managedInstanceId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_managed_instance_details,
response_type="ManagedInstance")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_managed_instance_details,
response_type="ManagedInstance")
def update_managed_instance_group(self, managed_instance_group_id, update_managed_instance_group_details, **kwargs):
"""
Updates a specific Managed Instance Group.
:param str managed_instance_group_id: (required)
OCID for the managed instance group
:param oci.os_management.models.UpdateManagedInstanceGroupDetails update_managed_instance_group_details: (required)
Details about a Managed Instance Group to update
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.ManagedInstanceGroup`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/update_managed_instance_group.py.html>`__ to see an example of how to use update_managed_instance_group API.
"""
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_managed_instance_group_details,
response_type="ManagedInstanceGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_managed_instance_group_details,
response_type="ManagedInstanceGroup")
def update_scheduled_job(self, scheduled_job_id, update_scheduled_job_details, **kwargs):
"""
Updates an existing Scheduled Job on the management system.
:param str scheduled_job_id: (required)
The ID of the scheduled job.
:param oci.os_management.models.UpdateScheduledJobDetails update_scheduled_job_details: (required)
Details about a Scheduled Job to update
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.ScheduledJob`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/update_scheduled_job.py.html>`__ to see an example of how to use update_scheduled_job API.
"""
resource_path = "/scheduledJobs/{scheduledJobId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_scheduled_job got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_scheduled_job_details,
response_type="ScheduledJob")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_scheduled_job_details,
response_type="ScheduledJob")
def update_software_source(self, software_source_id, update_software_source_details, **kwargs):
"""
Updates an existing custom Software Source on the management system.
:param str software_source_id: (required)
The OCID of the software source.
:param oci.os_management.models.UpdateSoftwareSourceDetails update_software_source_details: (required)
Details about a Sofware Source to update
:param str opc_request_id: (optional)
The client request ID for tracing.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call
for a resource, set the `if-match` parameter to the value of the
etag from a previous GET or POST response for that resource.
The resource will be updated or deleted only if the etag you
provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.os_management.models.SoftwareSource`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/osmanagement/update_software_source.py.html>`__ to see an example of how to use update_software_source API.
"""
resource_path = "/softwareSources/{softwareSourceId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_software_source_details,
response_type="SoftwareSource")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_software_source_details,
response_type="SoftwareSource")
| 48.336897 | 262 | 0.647632 |
from __future__ import absolute_import
from oci._vendor import requests
from oci._vendor import six
from oci import retry, circuit_breaker
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import os_management_type_mapping
missing = Sentinel("Missing")
class OsManagementClient(object):
def __init__(self, config, **kwargs):
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20190801',
'service_endpoint_template': 'https://osms.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False),
'circuit_breaker_strategy': kwargs.get('circuit_breaker_strategy', circuit_breaker.GLOBAL_CIRCUIT_BREAKER_STRATEGY)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
if base_client_init_kwargs.get('circuit_breaker_strategy') is None:
base_client_init_kwargs['circuit_breaker_strategy'] = circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY
self.base_client = BaseClient("os_management", config, signer, os_management_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
self.circuit_breaker_callback = kwargs.get('circuit_breaker_callback')
def add_packages_to_software_source(self, software_source_id, add_packages_to_software_source_details, **kwargs):
resource_path = "/softwareSources/{softwareSourceId}/actions/addPackages"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"add_packages_to_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_packages_to_software_source_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=add_packages_to_software_source_details)
def attach_child_software_source_to_managed_instance(self, managed_instance_id, attach_child_software_source_to_managed_instance_details, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/attachChildSoftwareSource"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"attach_child_software_source_to_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=attach_child_software_source_to_managed_instance_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=attach_child_software_source_to_managed_instance_details)
def attach_managed_instance_to_managed_instance_group(self, managed_instance_group_id, managed_instance_id, **kwargs):
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}/actions/attachManagedInstance"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"attach_managed_instance_to_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"managedInstanceId": managed_instance_id
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def attach_parent_software_source_to_managed_instance(self, managed_instance_id, attach_parent_software_source_to_managed_instance_details, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/attachParentSoftwareSource"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"attach_parent_software_source_to_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=attach_parent_software_source_to_managed_instance_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=attach_parent_software_source_to_managed_instance_details)
def change_managed_instance_group_compartment(self, managed_instance_group_id, change_managed_instance_group_compartment_details, **kwargs):
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}/actions/changeCompartment"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_managed_instance_group_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_managed_instance_group_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_managed_instance_group_compartment_details)
def change_scheduled_job_compartment(self, scheduled_job_id, change_scheduled_job_compartment_details, **kwargs):
resource_path = "/scheduledJobs/{scheduledJobId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_scheduled_job_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_scheduled_job_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_scheduled_job_compartment_details)
def change_software_source_compartment(self, software_source_id, change_software_source_compartment_details, **kwargs):
resource_path = "/softwareSources/{softwareSourceId}/actions/changeCompartment"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_software_source_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_software_source_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_software_source_compartment_details)
def create_managed_instance_group(self, create_managed_instance_group_details, **kwargs):
resource_path = "/managedInstanceGroups"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_managed_instance_group_details,
response_type="ManagedInstanceGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_managed_instance_group_details,
response_type="ManagedInstanceGroup")
def create_scheduled_job(self, create_scheduled_job_details, **kwargs):
resource_path = "/scheduledJobs"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_scheduled_job got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_scheduled_job_details,
response_type="ScheduledJob")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_scheduled_job_details,
response_type="ScheduledJob")
def create_software_source(self, create_software_source_details, **kwargs):
resource_path = "/softwareSources"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_software_source got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_software_source_details,
response_type="SoftwareSource")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_software_source_details,
response_type="SoftwareSource")
def delete_managed_instance_group(self, managed_instance_group_id, **kwargs):
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}"
method = "DELETE"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_scheduled_job(self, scheduled_job_id, **kwargs):
resource_path = "/scheduledJobs/{scheduledJobId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_scheduled_job got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_software_source(self, software_source_id, **kwargs):
resource_path = "/softwareSources/{softwareSourceId}"
method = "DELETE"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def detach_child_software_source_from_managed_instance(self, managed_instance_id, detach_child_software_source_from_managed_instance_details, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/detachChildSoftwareSource"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"detach_child_software_source_from_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=detach_child_software_source_from_managed_instance_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=detach_child_software_source_from_managed_instance_details)
def detach_managed_instance_from_managed_instance_group(self, managed_instance_group_id, managed_instance_id, **kwargs):
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}/actions/detachManagedInstance"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"detach_managed_instance_from_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"managedInstanceId": managed_instance_id
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def detach_parent_software_source_from_managed_instance(self, managed_instance_id, detach_parent_software_source_from_managed_instance_details, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/detachParentSoftwareSource"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"detach_parent_software_source_from_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=detach_parent_software_source_from_managed_instance_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=detach_parent_software_source_from_managed_instance_details)
def get_erratum(self, erratum_id, **kwargs):
resource_path = "/errata/{erratumId}"
method = "GET"
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_erratum got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"erratumId": erratum_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Erratum")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="Erratum")
def get_managed_instance(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagedInstance")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagedInstance")
def get_managed_instance_group(self, managed_instance_group_id, **kwargs):
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}"
method = "GET"
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagedInstanceGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ManagedInstanceGroup")
def get_scheduled_job(self, scheduled_job_id, **kwargs):
resource_path = "/scheduledJobs/{scheduledJobId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_scheduled_job got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ScheduledJob")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ScheduledJob")
def get_software_package(self, software_source_id, software_package_name, **kwargs):
resource_path = "/softwareSources/{softwareSourceId}/softwarePackages/{softwarePackageName}"
method = "GET"
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_software_package got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id,
"softwarePackageName": software_package_name
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="SoftwarePackage")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="SoftwarePackage")
def get_software_source(self, software_source_id, **kwargs):
resource_path = "/softwareSources/{softwareSourceId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="SoftwareSource")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="SoftwareSource")
def get_windows_update(self, windows_update, **kwargs):
resource_path = "/updates/{windowsUpdate}"
method = "GET"
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_windows_update got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"windowsUpdate": windows_update
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WindowsUpdate")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WindowsUpdate")
def get_work_request(self, work_request_id, **kwargs):
resource_path = "/workRequests/{workRequestId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
def install_all_package_updates_on_managed_instance(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/packages/updateAll"
method = "POST"
expected_kwargs = [
"retry_strategy",
"update_type",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_all_package_updates_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'update_type' in kwargs:
update_type_allowed_values = ["SECURITY", "BUGFIX", "ENHANCEMENT", "OTHER", "KSPLICE", "ALL"]
if kwargs['update_type'] not in update_type_allowed_values:
raise ValueError(
"Invalid value for `update_type`, must be one of {0}".format(update_type_allowed_values)
)
query_params = {
"updateType": kwargs.get("update_type", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_all_updates_on_managed_instance_group(self, managed_instance_group_id, **kwargs):
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}/actions/updates/installAll"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"update_type",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_all_updates_on_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'update_type' in kwargs:
update_type_allowed_values = ["SECURITY", "BUGFIX", "ENHANCEMENT", "OTHER", "KSPLICE", "ALL"]
if kwargs['update_type'] not in update_type_allowed_values:
raise ValueError(
"Invalid value for `update_type`, must be one of {0}".format(update_type_allowed_values)
)
query_params = {
"updateType": kwargs.get("update_type", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_all_windows_updates_on_managed_instance(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/updates/installAll"
method = "POST"
expected_kwargs = [
"retry_strategy",
"update_type",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_all_windows_updates_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'update_type' in kwargs:
update_type_allowed_values = ["SECURITY", "BUGFIX", "ENHANCEMENT", "OTHER", "KSPLICE", "ALL"]
if kwargs['update_type'] not in update_type_allowed_values:
raise ValueError(
"Invalid value for `update_type`, must be one of {0}".format(update_type_allowed_values)
)
query_params = {
"updateType": kwargs.get("update_type", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_package_on_managed_instance(self, managed_instance_id, software_package_name, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/packages/install"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_package_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"softwarePackageName": software_package_name
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_package_update_on_managed_instance(self, managed_instance_id, software_package_name, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/packages/update"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_package_update_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"softwarePackageName": software_package_name
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def install_windows_update_on_managed_instance(self, managed_instance_id, windows_update_name, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/updates/install"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"install_windows_update_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"windowsUpdateName": windows_update_name
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def list_available_packages_for_managed_instance(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/packages/available"
method = "GET"
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_available_packages_for_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstallablePackageSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstallablePackageSummary]")
def list_available_software_sources_for_managed_instance(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/availableSoftwareSources"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_available_software_sources_for_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableSoftwareSourceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableSoftwareSourceSummary]")
def list_available_updates_for_managed_instance(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/packages/updates"
method = "GET"
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_available_updates_for_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableUpdateSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableUpdateSummary]")
def list_available_windows_updates_for_managed_instance(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/updates/available"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"is_eligible_for_installation"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_available_windows_updates_for_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'is_eligible_for_installation' in kwargs:
is_eligible_for_installation_allowed_values = ["INSTALLABLE", "NOT_INSTALLABLE", "UNKNOWN"]
if kwargs['is_eligible_for_installation'] not in is_eligible_for_installation_allowed_values:
raise ValueError(
"Invalid value for `is_eligible_for_installation`, must be one of {0}".format(is_eligible_for_installation_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"isEligibleForInstallation": kwargs.get("is_eligible_for_installation", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableWindowsUpdateSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[AvailableWindowsUpdateSummary]")
def list_errata(self, **kwargs):
resource_path = "/errata"
method = "GET"
expected_kwargs = [
"retry_strategy",
"compartment_id",
"erratum_id",
"advisory_name",
"time_issue_date_start",
"time_issue_date_end",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_errata got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["ISSUEDATE", "ADVISORYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": kwargs.get("compartment_id", missing),
"erratumId": kwargs.get("erratum_id", missing),
"advisoryName": kwargs.get("advisory_name", missing),
"timeIssueDateStart": kwargs.get("time_issue_date_start", missing),
"timeIssueDateEnd": kwargs.get("time_issue_date_end", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ErratumSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ErratumSummary]")
def list_managed_instance_errata(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/errata"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_managed_instance_errata got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[ErratumSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[ErratumSummary]")
def list_managed_instance_groups(self, compartment_id, **kwargs):
resource_path = "/managedInstanceGroups"
method = "GET"
expected_kwargs = [
"retry_strategy",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"lifecycle_state",
"os_family"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_managed_instance_groups got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"osFamily": kwargs.get("os_family", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ManagedInstanceGroupSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ManagedInstanceGroupSummary]")
def list_managed_instances(self, compartment_id, **kwargs):
resource_path = "/managedInstances"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"os_family"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_managed_instances got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"osFamily": kwargs.get("os_family", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ManagedInstanceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ManagedInstanceSummary]")
def list_packages_installed_on_managed_instance(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/packages"
method = "GET"
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_packages_installed_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstalledPackageSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstalledPackageSummary]")
def list_scheduled_jobs(self, compartment_id, **kwargs):
resource_path = "/scheduledJobs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"managed_instance_id",
"managed_instance_group_id",
"operation_type",
"limit",
"page",
"sort_order",
"sort_by",
"lifecycle_state",
"opc_request_id",
"os_family",
"is_restricted"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_scheduled_jobs got unknown kwargs: {!r}".format(extra_kwargs))
if 'operation_type' in kwargs:
operation_type_allowed_values = ["INSTALL", "UPDATE", "REMOVE", "UPDATEALL"]
if kwargs['operation_type'] not in operation_type_allowed_values:
raise ValueError(
"Invalid value for `operation_type`, must be one of {0}".format(operation_type_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"managedInstanceId": kwargs.get("managed_instance_id", missing),
"managedInstanceGroupId": kwargs.get("managed_instance_group_id", missing),
"operationType": kwargs.get("operation_type", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"osFamily": kwargs.get("os_family", missing),
"isRestricted": kwargs.get("is_restricted", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ScheduledJobSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ScheduledJobSummary]")
def list_software_source_packages(self, software_source_id, **kwargs):
resource_path = "/softwareSources/{softwareSourceId}/softwarePackages"
method = "GET"
expected_kwargs = [
"retry_strategy",
"compartment_id",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_software_source_packages got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": kwargs.get("compartment_id", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwarePackageSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwarePackageSummary]")
def list_software_sources(self, compartment_id, **kwargs):
resource_path = "/softwareSources"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"lifecycle_state",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_software_sources got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwareSourceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwareSourceSummary]")
def list_upcoming_scheduled_jobs(self, compartment_id, time_end, **kwargs):
resource_path = "/scheduledJobs/upcomingSchedules"
method = "GET"
expected_kwargs = [
"retry_strategy",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"tag_name",
"tag_value",
"lifecycle_state",
"opc_request_id",
"os_family"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_upcoming_scheduled_jobs got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"timeEnd": time_end,
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"tagName": kwargs.get("tag_name", missing),
"tagValue": kwargs.get("tag_value", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"osFamily": kwargs.get("os_family", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ScheduledJobSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[ScheduledJobSummary]")
def list_windows_updates(self, **kwargs):
resource_path = "/updates"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"compartment_id",
"display_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_windows_updates got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": kwargs.get("compartment_id", missing),
"displayName": kwargs.get("display_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WindowsUpdateSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WindowsUpdateSummary]")
def list_windows_updates_installed_on_managed_instance(self, managed_instance_id, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/updates/installed"
method = "GET"
expected_kwargs = [
"retry_strategy",
"display_name",
"compartment_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_windows_updates_installed_on_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"displayName": kwargs.get("display_name", missing),
"compartmentId": kwargs.get("compartment_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstalledWindowsUpdateSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[InstalledWindowsUpdateSummary]")
def list_work_request_errors(self, work_request_id, **kwargs):
resource_path = "/workRequests/{workRequestId}/errors"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_errors got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]")
def list_work_request_logs(self, work_request_id, **kwargs):
resource_path = "/workRequests/{workRequestId}/logs"
method = "GET"
expected_kwargs = [
"retry_strategy",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]")
def list_work_requests(self, compartment_id, **kwargs):
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"managed_instance_id",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id",
"os_family"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'os_family' in kwargs:
os_family_allowed_values = ["LINUX", "WINDOWS", "ALL"]
if kwargs['os_family'] not in os_family_allowed_values:
raise ValueError(
"Invalid value for `os_family`, must be one of {0}".format(os_family_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"managedInstanceId": kwargs.get("managed_instance_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing),
"osFamily": kwargs.get("os_family", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
def remove_package_from_managed_instance(self, managed_instance_id, software_package_name, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}/actions/packages/remove"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"remove_package_from_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
query_params = {
"softwarePackageName": software_package_name
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params)
def remove_packages_from_software_source(self, software_source_id, remove_packages_from_software_source_details, **kwargs):
resource_path = "/softwareSources/{softwareSourceId}/actions/removePackages"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"remove_packages_from_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_packages_from_software_source_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=remove_packages_from_software_source_details)
def run_scheduled_job_now(self, scheduled_job_id, **kwargs):
resource_path = "/scheduledJobs/{scheduledJobId}/actions/runNow"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"run_scheduled_job_now got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def search_software_packages(self, **kwargs):
resource_path = "/softwareSources/softwarePackages"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"software_package_name",
"display_name",
"cve_name",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"search_software_packages got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"softwarePackageName": kwargs.get("software_package_name", missing),
"displayName": kwargs.get("display_name", missing),
"cveName": kwargs.get("cve_name", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwarePackageSearchSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[SoftwarePackageSearchSummary]")
def skip_next_scheduled_job_execution(self, scheduled_job_id, **kwargs):
resource_path = "/scheduledJobs/{scheduledJobId}/actions/skipNextExecution"
method = "POST"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"skip_next_scheduled_job_execution got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def update_managed_instance(self, managed_instance_id, update_managed_instance_details, **kwargs):
resource_path = "/managedInstances/{managedInstanceId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_managed_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceId": managed_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_managed_instance_details,
response_type="ManagedInstance")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_managed_instance_details,
response_type="ManagedInstance")
def update_managed_instance_group(self, managed_instance_group_id, update_managed_instance_group_details, **kwargs):
resource_path = "/managedInstanceGroups/{managedInstanceGroupId}"
method = "PUT"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_managed_instance_group got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"managedInstanceGroupId": managed_instance_group_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_managed_instance_group_details,
response_type="ManagedInstanceGroup")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_managed_instance_group_details,
response_type="ManagedInstanceGroup")
def update_scheduled_job(self, scheduled_job_id, update_scheduled_job_details, **kwargs):
resource_path = "/scheduledJobs/{scheduledJobId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_scheduled_job got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"scheduledJobId": scheduled_job_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_scheduled_job_details,
response_type="ScheduledJob")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_scheduled_job_details,
response_type="ScheduledJob")
def update_software_source(self, software_source_id, update_software_source_details, **kwargs):
resource_path = "/softwareSources/{softwareSourceId}"
method = "PUT"
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_software_source got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"softwareSourceId": software_source_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_software_source_details,
response_type="SoftwareSource")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_software_source_details,
response_type="SoftwareSource")
| true | true |
1c32cbf02d8a965ded2df7baac755640a4f074e0 | 2,501 | py | Python | kornia/contrib/max_blur_pool.py | IEM-Computer-Vision/kornia | f98bd9a2158a6e59cda076d55d476acf13f4e0af | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/contrib/max_blur_pool.py | IEM-Computer-Vision/kornia | f98bd9a2158a6e59cda076d55d476acf13f4e0af | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/contrib/max_blur_pool.py | IEM-Computer-Vision/kornia | f98bd9a2158a6e59cda076d55d476acf13f4e0af | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.geometry.transform.pyramid import pyrdown
__all__ = [
"max_blur_pool2d",
"MaxBlurPool2d",
]
def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]:
"""Computes zero padding tuple."""
padding = [(k - 1) // 2 for k in kernel_size]
return padding[0], padding[1]
class MaxBlurPool2d(nn.Module):
r"""Creates a module that computes pools and blurs and downsample a given
feature map.
See :cite:`zhang2019shiftinvar` for more details.
Args:
kernel_size (int): the kernel size for max pooling..
ceil_mode (bool): should be true to match output size of conv2d with same kernel size.
Shape:
- Input: :math:`(B, C, H, W)`
- Output: :math:`(B, C, H / 2, W / 2)`
Returns:
torch.Tensor: the transformed tensor.
Examples:
>>> input = torch.rand(1, 4, 4, 8)
>>> pool = kornia.contrib.MaxblurPool2d(kernel_size=3)
>>> output = pool(input) # 1x4x2x4
"""
def __init__(self, kernel_size: int, ceil_mode: bool = False) -> None:
super(MaxBlurPool2d, self).__init__()
self.ceil_mode: bool = ceil_mode
self.kernel_size: Tuple[int, int] = (kernel_size, kernel_size)
self.padding: Tuple[int, int] = _compute_zero_padding(self.kernel_size)
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
if not torch.is_tensor(input):
raise TypeError("Input input type is not a torch.Tensor. Got {}"
.format(type(input)))
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
.format(input.shape))
# compute local maxima
x_max: torch.Tensor = F.max_pool2d(
input, kernel_size=self.kernel_size,
padding=self.padding, stride=1, ceil_mode=self.ceil_mode)
# blur and downsample
x_down: torch.Tensor = pyrdown(x_max)
return x_down
######################
# functional interface
######################
def max_blur_pool2d(input: torch.Tensor, kernel_size: int, ceil_mode: bool = False) -> torch.Tensor:
r"""Creates a module that computes pools and blurs and downsample a given
feature map.
See :class:`~kornia.contrib.MaxBlurPool2d` for details.
"""
return MaxBlurPool2d(kernel_size, ceil_mode)(input)
| 31.658228 | 100 | 0.62615 | from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.geometry.transform.pyramid import pyrdown
__all__ = [
"max_blur_pool2d",
"MaxBlurPool2d",
]
def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]:
padding = [(k - 1) // 2 for k in kernel_size]
return padding[0], padding[1]
class MaxBlurPool2d(nn.Module):
def __init__(self, kernel_size: int, ceil_mode: bool = False) -> None:
super(MaxBlurPool2d, self).__init__()
self.ceil_mode: bool = ceil_mode
self.kernel_size: Tuple[int, int] = (kernel_size, kernel_size)
self.padding: Tuple[int, int] = _compute_zero_padding(self.kernel_size)
def forward(self, input: torch.Tensor) -> torch.Tensor:
if not torch.is_tensor(input):
raise TypeError("Input input type is not a torch.Tensor. Got {}"
.format(type(input)))
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
.format(input.shape))
x_max: torch.Tensor = F.max_pool2d(
input, kernel_size=self.kernel_size,
padding=self.padding, stride=1, ceil_mode=self.ceil_mode)
x_down: torch.Tensor = pyrdown(x_max)
return x_down
| true | true |
1c32cc54145a5ead761dc49d1c6ae9962191bca9 | 502 | py | Python | python/tuples.py | Shaimyst/toy-problems | 3ed20f7cba90504f0711f58c387d30db23be858b | [
"MIT"
] | null | null | null | python/tuples.py | Shaimyst/toy-problems | 3ed20f7cba90504f0711f58c387d30db23be858b | [
"MIT"
] | null | null | null | python/tuples.py | Shaimyst/toy-problems | 3ed20f7cba90504f0711f58c387d30db23be858b | [
"MIT"
] | null | null | null | # Task
# Given an integer, 'n', and 'n' space-separated integers as input, create a tuple, 't',
# of those integers. Then compute and print the result of hash(t).
# Sample Input 0
# 2
# 1 2
# Sample Output 0
# 3713081631934410656
if __name__ == '__main__':
n = int(raw_input())
integer_list = map(int, raw_input().split())
# integer_list is a list of integers
# create tuple of integers
# hash the integers
my_tuple = tuple(integer_list)
print(hash(my_tuple)) | 22.818182 | 88 | 0.661355 |
if __name__ == '__main__':
n = int(raw_input())
integer_list = map(int, raw_input().split())
my_tuple = tuple(integer_list)
print(hash(my_tuple)) | true | true |
1c32cdf46dce0ec761ecdb963bbacea91f05c4a1 | 1,395 | py | Python | python-opcua/timetable_parser.py | ssriblo/ionic-smarthome-test-1 | 060bc247e0b8295d6cd869d90b364756515cfc19 | [
"MIT"
] | 1 | 2020-12-18T15:18:19.000Z | 2020-12-18T15:18:19.000Z | python-opcua/timetable_parser.py | ssriblo/ionic-smarthome-test-1 | 060bc247e0b8295d6cd869d90b364756515cfc19 | [
"MIT"
] | 42 | 2020-08-20T04:01:12.000Z | 2021-01-09T18:50:21.000Z | python-opcua/timetable_parser.py | ssriblo/ionic-smarthome-test-1 | 060bc247e0b8295d6cd869d90b364756515cfc19 | [
"MIT"
] | null | null | null | import numpy as np
import sys
import json
class DataParserTT(object):
def __init__(self):
pass
# Let convert from string to json:
#
def timetableParser(self, str):
# ob = self.obj
a = [None]*16
print(f"\n\nstr type", type(str), str) # магическим образом этот принт нужен - без него type(str)=dict и все падает
ob = json.loads(str)
# print(f"OBJECT", ob, ob['mode'])
try:
if (ob['mode'] == "TimeTable"):
a[0] = 1
else:
a[0] = 0
a[1] = ob['comf_0']
a[2] = ob['comf_1']
a[3] = ob['econ_0']
a[4] = ob['econ_1']
a[5] = 0 # Reserved
a[6] = 0 # Reserved
a[7] = ob['tt_vals'][0]['start']
a[8] = ob['tt_vals'][0]['end']
a[9] = ob['tt_vals'][1]['start']
a[10] = ob['tt_vals'][1]['end']
a[11] = ob['tt_vals'][2]['start']
a[12] = ob['tt_vals'][2]['end']
for i in range(0,3):
array = []
for j in range(0,6):
array.append(ob['tt_days'][i][j])
nda = np.packbits(array, bitorder='little')
a[i+13] = int(nda[0])
except:
e = sys.exc_info()
print( "EXCEPTION3: ", e[0], e[1])
return a
| 31 | 123 | 0.424373 | import numpy as np
import sys
import json
class DataParserTT(object):
def __init__(self):
pass
def timetableParser(self, str):
a = [None]*16
print(f"\n\nstr type", type(str), str)
ob = json.loads(str)
try:
if (ob['mode'] == "TimeTable"):
a[0] = 1
else:
a[0] = 0
a[1] = ob['comf_0']
a[2] = ob['comf_1']
a[3] = ob['econ_0']
a[4] = ob['econ_1']
a[5] = 0
a[6] = 0
a[7] = ob['tt_vals'][0]['start']
a[8] = ob['tt_vals'][0]['end']
a[9] = ob['tt_vals'][1]['start']
a[10] = ob['tt_vals'][1]['end']
a[11] = ob['tt_vals'][2]['start']
a[12] = ob['tt_vals'][2]['end']
for i in range(0,3):
array = []
for j in range(0,6):
array.append(ob['tt_days'][i][j])
nda = np.packbits(array, bitorder='little')
a[i+13] = int(nda[0])
except:
e = sys.exc_info()
print( "EXCEPTION3: ", e[0], e[1])
return a
| true | true |
1c32cebad503494afaeb02a6817eeb5b18bd9cd8 | 12,464 | py | Python | src/gluonts/model/deepvar_hierarchical/_network.py | Xiaoxiong-Liu/gluon-ts | 097c492769258dd70b7f223f826b17b0051ceee9 | [
"Apache-2.0"
] | 2,648 | 2019-06-03T17:18:27.000Z | 2022-03-31T08:29:22.000Z | src/gluonts/model/deepvar_hierarchical/_network.py | Xiaoxiong-Liu/gluon-ts | 097c492769258dd70b7f223f826b17b0051ceee9 | [
"Apache-2.0"
] | 1,220 | 2019-06-04T09:00:14.000Z | 2022-03-31T10:45:43.000Z | src/gluonts/model/deepvar_hierarchical/_network.py | Xiaoxiong-Liu/gluon-ts | 097c492769258dd70b7f223f826b17b0051ceee9 | [
"Apache-2.0"
] | 595 | 2019-06-04T01:04:31.000Z | 2022-03-30T10:40:26.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
from itertools import product
# Third-party imports
import mxnet as mx
import numpy as np
# First-party imports
from gluonts.core.component import validated
from gluonts.mx import Tensor
from gluonts.mx.distribution import Distribution, DistributionOutput
from gluonts.mx.distribution import EmpiricalDistribution
from gluonts.mx.util import assert_shape
from gluonts.mx.distribution import LowrankMultivariateGaussian
from gluonts.model.deepvar._network import (
DeepVARNetwork,
DeepVARTrainingNetwork,
DeepVARPredictionNetwork,
)
def reconcile_samples(
reconciliation_mat: Tensor,
samples: Tensor,
seq_axis: Optional[List] = None,
) -> Tensor:
"""
Computes coherent samples by multiplying unconstrained `samples` with `reconciliation_mat`.
Parameters
----------
reconciliation_mat
Shape: (target_dim, target_dim)
samples
Unconstrained samples
Shape: `(*batch_shape, target_dim)`
During training: (num_samples, batch_size, seq_len, target_dim)
During prediction: (num_parallel_samples x batch_size, seq_len, target_dim)
seq_axis
Specifies the list of axes that should be reconciled sequentially.
By default, all axes are processeed in parallel.
Returns
-------
Tensor, shape same as that of `samples`
Coherent samples
"""
if not seq_axis:
return mx.nd.dot(samples, reconciliation_mat, transpose_b=True)
else:
num_dims = len(samples.shape)
last_dim_in_seq_axis = num_dims - 1 in seq_axis or -1 in seq_axis
assert (
not last_dim_in_seq_axis
), f"The last dimension cannot be processed iteratively. Remove axis {num_dims - 1} (or -1) from `seq_axis`."
# In this case, reconcile samples by going over each index in `seq_axis` iteratively.
# Note that `seq_axis` can be more than one dimension.
num_seq_axes = len(seq_axis)
# bring the axes to iterate in the beginning
samples = mx.nd.moveaxis(samples, seq_axis, list(range(num_seq_axes)))
seq_axes_sizes = samples.shape[:num_seq_axes]
out = [
mx.nd.dot(samples[idx], reconciliation_mat, transpose_b=True)
# get the sequential index from the cross-product of their sizes.
for idx in product(*[range(size) for size in seq_axes_sizes])
]
# put the axis in the correct order again
out = mx.nd.concat(*out, dim=0).reshape(samples.shape)
out = mx.nd.moveaxis(out, list(range(len(seq_axis))), seq_axis)
return out
def reconciliation_error(A: Tensor, samples: Tensor) -> float:
r"""
Computes the maximum relative reconciliation error among all the aggregated time series
.. math::
\max_i \frac{|y_i - s_i|} {|y_i|},
where :math:`i` refers to the aggregated time series index, :math:`y_i` is the (direct) forecast obtained for
the :math:`i^{th}` time series and :math:`s_i` is its aggregated forecast obtained by summing the corresponding
bottom-level forecasts. If :math:`y_i` is zero, then the absolute difference, :math:`|s_i|`, is used instead.
This can be comupted as follows given the constraint matrix A:
.. math::
\max \frac{|A \times samples|} {|samples[:r]|},
where :math:`r` is the number aggregated time series.
Parameters
----------
A
The constraint matrix A in the equation: Ay = 0 (y being the values/forecasts of all time series in the
hierarchy).
samples
Samples. Shape: `(*batch_shape, target_dim)`.
Returns
-------
Float
Reconciliation error
"""
num_agg_ts = A.shape[0]
forecasts_agg_ts = samples.slice_axis(
axis=-1, begin=0, end=num_agg_ts
).asnumpy()
abs_err = mx.nd.abs(mx.nd.dot(samples, A, transpose_b=True)).asnumpy()
rel_err = np.where(
forecasts_agg_ts == 0,
abs_err,
abs_err / np.abs(forecasts_agg_ts),
)
return np.max(rel_err)
class DeepVARHierarchicalNetwork(DeepVARNetwork):
@validated()
def __init__(
self,
M,
A,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
lags_seq: List[int],
target_dim: int,
cardinality: List[int] = [1],
embedding_dimension: int = 1,
scaling: bool = True,
seq_axis: List[int] = None,
**kwargs,
) -> None:
super().__init__(
num_layers=num_layers,
num_cells=num_cells,
cell_type=cell_type,
history_length=history_length,
context_length=context_length,
prediction_length=prediction_length,
distr_output=distr_output,
dropout_rate=dropout_rate,
lags_seq=lags_seq,
target_dim=target_dim,
cardinality=cardinality,
embedding_dimension=embedding_dimension,
scaling=scaling,
**kwargs,
)
self.M = M
self.A = A
self.seq_axis = seq_axis
def get_samples_for_loss(self, distr: Distribution) -> Tensor:
"""
Get samples to compute the final loss. These are samples directly drawn from the given `distr` if coherence is
not enforced yet; otherwise the drawn samples are reconciled.
Parameters
----------
distr
Distribution instances
Returns
-------
samples
Tensor with shape (num_samples, batch_size, seq_len, target_dim)
"""
samples = distr.sample_rep(
num_samples=self.num_samples_for_loss, dtype="float32"
)
# Determine which epoch we are currently in.
self.batch_no += 1
epoch_no = self.batch_no // self.num_batches_per_epoch + 1
epoch_frac = epoch_no / self.epochs
if (
self.coherent_train_samples
and epoch_frac > self.warmstart_epoch_frac
):
coherent_samples = reconcile_samples(
reconciliation_mat=self.M,
samples=samples,
seq_axis=self.seq_axis,
)
assert_shape(coherent_samples, samples.shape)
return coherent_samples
else:
return samples
def loss(self, F, target: Tensor, distr: Distribution) -> Tensor:
"""
Computes loss given the output of the network in the form of distribution.
The loss is given by:
`self.CRPS_weight` * `loss_CRPS` + `self.likelihood_weight` * `neg_likelihoods`,
where
* `loss_CRPS` is computed on the samples drawn from the predicted `distr` (optionally after reconciling them),
* `neg_likelihoods` are either computed directly using the predicted `distr` or from the estimated
distribution based on (coherent) samples, depending on the `sample_LH` flag.
Parameters
----------
F
target
Tensor with shape (batch_size, seq_len, target_dim)
distr
Distribution instances
Returns
-------
Loss
Tensor with shape (batch_size, seq_length, 1)
"""
# Sample from the predicted distribution if we are computing CRPS loss or likelihood using the distribution
# based on (coherent) samples.
# Samples shape: (num_samples, batch_size, seq_len, target_dim)
if self.sample_LH or (self.CRPS_weight > 0.0):
samples = self.get_samples_for_loss(distr=distr)
if self.sample_LH:
# Estimate the distribution based on (coherent) samples.
distr = LowrankMultivariateGaussian.fit(F, samples=samples, rank=0)
neg_likelihoods = -distr.log_prob(target).expand_dims(axis=-1)
loss_CRPS = F.zeros_like(neg_likelihoods)
if self.CRPS_weight > 0.0:
loss_CRPS = (
EmpiricalDistribution(samples=samples, event_dim=1)
.crps_univariate(x=target)
.expand_dims(axis=-1)
)
return (
self.CRPS_weight * loss_CRPS
+ self.likelihood_weight * neg_likelihoods
)
def post_process_samples(self, samples: Tensor) -> Tensor:
"""
Reconcile samples if `coherent_pred_samples` is True.
Parameters
----------
samples
Tensor of shape (num_parallel_samples*batch_size, 1, target_dim)
Returns
-------
Tensor of coherent samples.
"""
if not self.coherent_pred_samples:
return samples
else:
coherent_samples = reconcile_samples(
reconciliation_mat=self.M,
samples=samples,
seq_axis=self.seq_axis,
)
assert_shape(coherent_samples, samples.shape)
# assert that A*X_proj ~ 0
if self.assert_reconciliation:
assert (
reconciliation_error(self.A, samples=coherent_samples)
< self.reconciliation_tol
)
return coherent_samples
class DeepVARHierarchicalTrainingNetwork(
DeepVARHierarchicalNetwork, DeepVARTrainingNetwork
):
def __init__(
self,
num_samples_for_loss: int,
likelihood_weight: float,
CRPS_weight: float,
coherent_train_samples: bool,
warmstart_epoch_frac: float,
epochs: float,
num_batches_per_epoch: float,
sample_LH: bool,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_samples_for_loss = num_samples_for_loss
self.likelihood_weight = likelihood_weight
self.CRPS_weight = CRPS_weight
self.coherent_train_samples = coherent_train_samples
self.warmstart_epoch_frac = warmstart_epoch_frac
self.epochs = epochs
self.num_batches_per_epoch = num_batches_per_epoch
self.batch_no = 0
self.sample_LH = sample_LH
# Assert CRPS_weight, likelihood_weight, and coherent_train_samples have harmonious values
assert self.CRPS_weight >= 0.0, "CRPS weight must be non-negative"
assert (
self.likelihood_weight >= 0.0
), "Likelihood weight must be non-negative!"
assert (
self.likelihood_weight + self.CRPS_weight > 0.0
), "At least one of CRPS or likelihood weights must be non-zero"
if self.CRPS_weight == 0.0 and self.coherent_train_samples:
assert "No sampling being performed. coherent_train_samples flag is ignored"
if not self.sample_LH == 0.0 and self.coherent_train_samples:
assert "No sampling being performed. coherent_train_samples flag is ignored"
if self.likelihood_weight == 0.0 and self.sample_LH:
assert (
"likelihood_weight is 0 but sample likelihoods are still being calculated. "
"Set sample_LH=0 when likelihood_weight=0"
)
class DeepVARHierarchicalPredictionNetwork(
DeepVARHierarchicalNetwork, DeepVARPredictionNetwork
):
@validated()
def __init__(
self,
num_parallel_samples: int,
assert_reconciliation: bool,
coherent_pred_samples: bool,
reconciliation_tol: float,
**kwargs,
) -> None:
super().__init__(num_parallel_samples=num_parallel_samples, **kwargs)
self.coherent_pred_samples = coherent_pred_samples
self.assert_reconciliation = assert_reconciliation
self.reconciliation_tol = reconciliation_tol
| 33.326203 | 120 | 0.634467 |
from typing import List, Optional
from itertools import product
import mxnet as mx
import numpy as np
from gluonts.core.component import validated
from gluonts.mx import Tensor
from gluonts.mx.distribution import Distribution, DistributionOutput
from gluonts.mx.distribution import EmpiricalDistribution
from gluonts.mx.util import assert_shape
from gluonts.mx.distribution import LowrankMultivariateGaussian
from gluonts.model.deepvar._network import (
DeepVARNetwork,
DeepVARTrainingNetwork,
DeepVARPredictionNetwork,
)
def reconcile_samples(
reconciliation_mat: Tensor,
samples: Tensor,
seq_axis: Optional[List] = None,
) -> Tensor:
if not seq_axis:
return mx.nd.dot(samples, reconciliation_mat, transpose_b=True)
else:
num_dims = len(samples.shape)
last_dim_in_seq_axis = num_dims - 1 in seq_axis or -1 in seq_axis
assert (
not last_dim_in_seq_axis
), f"The last dimension cannot be processed iteratively. Remove axis {num_dims - 1} (or -1) from `seq_axis`."
num_seq_axes = len(seq_axis)
samples = mx.nd.moveaxis(samples, seq_axis, list(range(num_seq_axes)))
seq_axes_sizes = samples.shape[:num_seq_axes]
out = [
mx.nd.dot(samples[idx], reconciliation_mat, transpose_b=True)
for idx in product(*[range(size) for size in seq_axes_sizes])
]
out = mx.nd.concat(*out, dim=0).reshape(samples.shape)
out = mx.nd.moveaxis(out, list(range(len(seq_axis))), seq_axis)
return out
def reconciliation_error(A: Tensor, samples: Tensor) -> float:
num_agg_ts = A.shape[0]
forecasts_agg_ts = samples.slice_axis(
axis=-1, begin=0, end=num_agg_ts
).asnumpy()
abs_err = mx.nd.abs(mx.nd.dot(samples, A, transpose_b=True)).asnumpy()
rel_err = np.where(
forecasts_agg_ts == 0,
abs_err,
abs_err / np.abs(forecasts_agg_ts),
)
return np.max(rel_err)
class DeepVARHierarchicalNetwork(DeepVARNetwork):
@validated()
def __init__(
self,
M,
A,
num_layers: int,
num_cells: int,
cell_type: str,
history_length: int,
context_length: int,
prediction_length: int,
distr_output: DistributionOutput,
dropout_rate: float,
lags_seq: List[int],
target_dim: int,
cardinality: List[int] = [1],
embedding_dimension: int = 1,
scaling: bool = True,
seq_axis: List[int] = None,
**kwargs,
) -> None:
super().__init__(
num_layers=num_layers,
num_cells=num_cells,
cell_type=cell_type,
history_length=history_length,
context_length=context_length,
prediction_length=prediction_length,
distr_output=distr_output,
dropout_rate=dropout_rate,
lags_seq=lags_seq,
target_dim=target_dim,
cardinality=cardinality,
embedding_dimension=embedding_dimension,
scaling=scaling,
**kwargs,
)
self.M = M
self.A = A
self.seq_axis = seq_axis
def get_samples_for_loss(self, distr: Distribution) -> Tensor:
samples = distr.sample_rep(
num_samples=self.num_samples_for_loss, dtype="float32"
)
self.batch_no += 1
epoch_no = self.batch_no // self.num_batches_per_epoch + 1
epoch_frac = epoch_no / self.epochs
if (
self.coherent_train_samples
and epoch_frac > self.warmstart_epoch_frac
):
coherent_samples = reconcile_samples(
reconciliation_mat=self.M,
samples=samples,
seq_axis=self.seq_axis,
)
assert_shape(coherent_samples, samples.shape)
return coherent_samples
else:
return samples
def loss(self, F, target: Tensor, distr: Distribution) -> Tensor:
if self.sample_LH or (self.CRPS_weight > 0.0):
samples = self.get_samples_for_loss(distr=distr)
if self.sample_LH:
distr = LowrankMultivariateGaussian.fit(F, samples=samples, rank=0)
neg_likelihoods = -distr.log_prob(target).expand_dims(axis=-1)
loss_CRPS = F.zeros_like(neg_likelihoods)
if self.CRPS_weight > 0.0:
loss_CRPS = (
EmpiricalDistribution(samples=samples, event_dim=1)
.crps_univariate(x=target)
.expand_dims(axis=-1)
)
return (
self.CRPS_weight * loss_CRPS
+ self.likelihood_weight * neg_likelihoods
)
def post_process_samples(self, samples: Tensor) -> Tensor:
if not self.coherent_pred_samples:
return samples
else:
coherent_samples = reconcile_samples(
reconciliation_mat=self.M,
samples=samples,
seq_axis=self.seq_axis,
)
assert_shape(coherent_samples, samples.shape)
if self.assert_reconciliation:
assert (
reconciliation_error(self.A, samples=coherent_samples)
< self.reconciliation_tol
)
return coherent_samples
class DeepVARHierarchicalTrainingNetwork(
DeepVARHierarchicalNetwork, DeepVARTrainingNetwork
):
def __init__(
self,
num_samples_for_loss: int,
likelihood_weight: float,
CRPS_weight: float,
coherent_train_samples: bool,
warmstart_epoch_frac: float,
epochs: float,
num_batches_per_epoch: float,
sample_LH: bool,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.num_samples_for_loss = num_samples_for_loss
self.likelihood_weight = likelihood_weight
self.CRPS_weight = CRPS_weight
self.coherent_train_samples = coherent_train_samples
self.warmstart_epoch_frac = warmstart_epoch_frac
self.epochs = epochs
self.num_batches_per_epoch = num_batches_per_epoch
self.batch_no = 0
self.sample_LH = sample_LH
assert self.CRPS_weight >= 0.0, "CRPS weight must be non-negative"
assert (
self.likelihood_weight >= 0.0
), "Likelihood weight must be non-negative!"
assert (
self.likelihood_weight + self.CRPS_weight > 0.0
), "At least one of CRPS or likelihood weights must be non-zero"
if self.CRPS_weight == 0.0 and self.coherent_train_samples:
assert "No sampling being performed. coherent_train_samples flag is ignored"
if not self.sample_LH == 0.0 and self.coherent_train_samples:
assert "No sampling being performed. coherent_train_samples flag is ignored"
if self.likelihood_weight == 0.0 and self.sample_LH:
assert (
"likelihood_weight is 0 but sample likelihoods are still being calculated. "
"Set sample_LH=0 when likelihood_weight=0"
)
class DeepVARHierarchicalPredictionNetwork(
DeepVARHierarchicalNetwork, DeepVARPredictionNetwork
):
@validated()
def __init__(
self,
num_parallel_samples: int,
assert_reconciliation: bool,
coherent_pred_samples: bool,
reconciliation_tol: float,
**kwargs,
) -> None:
super().__init__(num_parallel_samples=num_parallel_samples, **kwargs)
self.coherent_pred_samples = coherent_pred_samples
self.assert_reconciliation = assert_reconciliation
self.reconciliation_tol = reconciliation_tol
| true | true |
1c32ced5986b513b59820c9d7855cb384387ef16 | 6,208 | py | Python | simpleai/search/models.py | kod3r/simpleai | 989f7ae3e1d770b34aa41611243635fd6109aff8 | [
"MIT"
] | 1 | 2019-04-19T23:05:28.000Z | 2019-04-19T23:05:28.000Z | simpleai/search/models.py | kod3r/simpleai | 989f7ae3e1d770b34aa41611243635fd6109aff8 | [
"MIT"
] | null | null | null | simpleai/search/models.py | kod3r/simpleai | 989f7ae3e1d770b34aa41611243635fd6109aff8 | [
"MIT"
] | null | null | null | # coding=utf-8
class SearchProblem(object):
'''Abstract base class to represent and manipulate the search space of a
problem.
In this class, the search space is meant to be represented implicitly as
a graph.
Each state corresponds with a problem state (ie, a valid configuration)
and each problem action (ie, a valid transformation to a configuracion)
corresponds with an edge.
To use this class with a problem seen as a graph search you should at
least implement: `actions`, `result` and `is_goal`.
Optionally, it might be useful to also implement `cost`.
To use this class with a problem seen as an optimization over target
function you should at least implement: `actions`, `result` and `value`.
'''
def __init__(self, initial_state):
self.initial_state = initial_state
def actions(self, state):
'''Returns the actions available to perform from `state`.
The returned value is an iterable over actions.
Actions are problem-specific and no assumption should be made about
them.
'''
raise NotImplementedError
def result(self, state, action):
'''Returns the resulting state of applying `action` to `state`.'''
raise NotImplementedError
def cost(self, state, action, state2):
'''Returns the cost of applying `action` from `state` to `state2`.
The returned value is a number (integer or floating point).
By default this function returns `1`.
'''
return 1
def is_goal(self, state):
'''Returns `True` if `state` is a goal state and `False` otherwise'''
raise NotImplementedError
def value(self, state):
'''Returns the value of `state` as it is needed by optimization
problems.
Value is a number (integer or floating point).'''
raise NotImplementedError
def heuristic(self, state):
'''Returns an estimate of the cost remaining to reach the solution
from `state`.'''
return 0
def crossover(self, state1, state2):
"""
Crossover method for genetic search. It should return a new state that
is the 'mix' (somehow) of `state1` and `state2`.
"""
raise NotImplementedError
def mutate(self, state):
"""
Mutation method for genetic search. It should return a new state that
is a slight random variation of `state`.
"""
raise NotImplementedError
def generate_random_state(self):
"""
Generates a random state for genetic search. It's mainly used for the
seed states in the initilization of genetic search.
"""
raise NotImplementedError
def state_representation(self, state):
"""
Returns a string representation of a state.
By default it returns repr(state).
"""
return repr(state)
class SearchNode(object):
'''Node of a search process.'''
def __init__(self, state, parent=None, action=None, cost=0, problem=None,
depth=0):
self.state = state
self.parent = parent
self.action = action
self.cost = cost
self.problem = problem or parent.problem
self.depth = depth
def expand(self, local_search=False):
'''Create successors.'''
new_nodes = []
for action in self.problem.actions(self.state):
new_state = self.problem.result(self.state, action)
cost = self.problem.cost(self.state,
action,
new_state)
nodefactory = self.__class__
new_nodes.append(nodefactory(state=new_state,
parent=None if local_search else self,
problem=self.problem,
action=action,
cost=self.cost + cost,
depth=self.depth + 1))
return new_nodes
def path(self):
'''Path (list of nodes and actions) from root to this node.'''
node = self
path = []
while node:
path.append((node.action, node.state))
node = node.parent
return list(reversed(path))
def __eq__(self, other):
return isinstance(other, SearchNode) and self.state == other.state
def __repr__(self):
return 'Node <%s>' % self.problem.state_representation(self.state)
class SearchNodeCostOrdered(SearchNode):
def __lt__(self, other):
return self.cost < other.cost
class SearchNodeValueOrdered(SearchNode):
def __init__(self, *args, **kwargs):
super(SearchNodeValueOrdered, self).__init__(*args, **kwargs)
self.value = self.problem.value(self.state)
def __lt__(self, other):
# value must work inverted, because heapq sorts 1-9
# and we need 9-1 sorting
return -self.value < -other.value
class SearchNodeHeuristicOrdered(SearchNode):
def __init__(self, *args, **kwargs):
super(SearchNodeHeuristicOrdered, self).__init__(*args, **kwargs)
self.heuristic = self.problem.heuristic(self.state)
def __lt__(self, other):
return self.heuristic < other.heuristic
class SearchNodeStarOrdered(SearchNodeHeuristicOrdered):
def __lt__(self, other):
return self.heuristic + self.cost < other.heuristic + other.cost
class CspProblem(object):
def __init__(self, variables, domains, constraints):
self.variables = variables
self.domains = domains
self.constraints = constraints
# variable-based constraints dict
self.var_contraints = dict([(v, [constraint
for constraint in constraints
if v in constraint[0]])
for v in variables])
# calculate degree of each variable
self.var_degrees = dict([(v, len(self.var_contraints[v]))
for v in variables])
| 35.073446 | 79 | 0.598099 |
class SearchProblem(object):
def __init__(self, initial_state):
self.initial_state = initial_state
def actions(self, state):
raise NotImplementedError
def result(self, state, action):
raise NotImplementedError
def cost(self, state, action, state2):
return 1
def is_goal(self, state):
raise NotImplementedError
def value(self, state):
raise NotImplementedError
def heuristic(self, state):
return 0
def crossover(self, state1, state2):
raise NotImplementedError
def mutate(self, state):
raise NotImplementedError
def generate_random_state(self):
raise NotImplementedError
def state_representation(self, state):
return repr(state)
class SearchNode(object):
def __init__(self, state, parent=None, action=None, cost=0, problem=None,
depth=0):
self.state = state
self.parent = parent
self.action = action
self.cost = cost
self.problem = problem or parent.problem
self.depth = depth
def expand(self, local_search=False):
new_nodes = []
for action in self.problem.actions(self.state):
new_state = self.problem.result(self.state, action)
cost = self.problem.cost(self.state,
action,
new_state)
nodefactory = self.__class__
new_nodes.append(nodefactory(state=new_state,
parent=None if local_search else self,
problem=self.problem,
action=action,
cost=self.cost + cost,
depth=self.depth + 1))
return new_nodes
def path(self):
node = self
path = []
while node:
path.append((node.action, node.state))
node = node.parent
return list(reversed(path))
def __eq__(self, other):
return isinstance(other, SearchNode) and self.state == other.state
def __repr__(self):
return 'Node <%s>' % self.problem.state_representation(self.state)
class SearchNodeCostOrdered(SearchNode):
def __lt__(self, other):
return self.cost < other.cost
class SearchNodeValueOrdered(SearchNode):
def __init__(self, *args, **kwargs):
super(SearchNodeValueOrdered, self).__init__(*args, **kwargs)
self.value = self.problem.value(self.state)
def __lt__(self, other):
return -self.value < -other.value
class SearchNodeHeuristicOrdered(SearchNode):
def __init__(self, *args, **kwargs):
super(SearchNodeHeuristicOrdered, self).__init__(*args, **kwargs)
self.heuristic = self.problem.heuristic(self.state)
def __lt__(self, other):
return self.heuristic < other.heuristic
class SearchNodeStarOrdered(SearchNodeHeuristicOrdered):
def __lt__(self, other):
return self.heuristic + self.cost < other.heuristic + other.cost
class CspProblem(object):
def __init__(self, variables, domains, constraints):
self.variables = variables
self.domains = domains
self.constraints = constraints
self.var_contraints = dict([(v, [constraint
for constraint in constraints
if v in constraint[0]])
for v in variables])
self.var_degrees = dict([(v, len(self.var_contraints[v]))
for v in variables])
| true | true |
1c32cf13fb47d925ee945cb67723304bbf17b044 | 2,298 | py | Python | tp_screening/tests/test_subject_screeing_form.py | MoffatMore/tp-screening | 20a98ba6fc57118968e507bdf5861cbdcada1848 | [
"MIT"
] | null | null | null | tp_screening/tests/test_subject_screeing_form.py | MoffatMore/tp-screening | 20a98ba6fc57118968e507bdf5861cbdcada1848 | [
"MIT"
] | null | null | null | tp_screening/tests/test_subject_screeing_form.py | MoffatMore/tp-screening | 20a98ba6fc57118968e507bdf5861cbdcada1848 | [
"MIT"
] | null | null | null | '''
Created on Jun 22, 2018
@author: moffat
'''
from django.test import TestCase, tag
from edc_constants.constants import YES, FEMALE, NO, NOT_APPLICABLE
from ..forms import SubjectScreeningForm
from edc_base.utils import get_utcnow
from copy import copy
@tag('TestSubjectForm')
class TestSubjectScreeningForm(TestCase):
def setUp(self):
self.screening_data = dict(
subject_identifier='12345',
report_datetime=get_utcnow(),
gender=FEMALE,
age_in_years=23,
guardian_present=NOT_APPLICABLE,
citizen=YES,
married_to_citizen=NOT_APPLICABLE,
marriage_certificate_present=NOT_APPLICABLE,
literate=YES,
literate_witness_present=NOT_APPLICABLE,
consent_ability=YES,
consented=YES,
reasons_ineligible={'None'})
def test_default_ok(self):
form = SubjectScreeningForm(data=self.screening_data)
form.is_valid()
self.assertEqual(form.errors, {})
self.assertTrue(form.save())
def test_citizen_married_to_citizen_not_applicable(self):
"""test when a participant is a citizen and
is_married_citizen is not applicable"""
data = copy(self.screening_data)
data.update(
citizen=YES,
married_to_citizen=YES,
marriage_certificate_present=YES)
form = SubjectScreeningForm(data=data)
form.is_valid()
self.assertEqual(
form.errors, {'married_to_citizen':
['This field is not applicable']})
def test_not_literate_witness_not_applicable(self):
data = copy(self.screening_data)
data.update(
literate=NO)
form = SubjectScreeningForm(data=data)
form.is_valid()
self.assertEqual(
form.errors, {'literate_witness_present':
['This field is applicable']})
def test_is_minor_guardian_none(self):
data = copy(self.screening_data)
data.update(
age_in_years=8)
form = SubjectScreeningForm(data=data)
form.is_valid()
self.assertEqual(
form.errors, {'guardian_present':
['This field is required.']})
| 31.916667 | 67 | 0.622715 |
from django.test import TestCase, tag
from edc_constants.constants import YES, FEMALE, NO, NOT_APPLICABLE
from ..forms import SubjectScreeningForm
from edc_base.utils import get_utcnow
from copy import copy
@tag('TestSubjectForm')
class TestSubjectScreeningForm(TestCase):
def setUp(self):
self.screening_data = dict(
subject_identifier='12345',
report_datetime=get_utcnow(),
gender=FEMALE,
age_in_years=23,
guardian_present=NOT_APPLICABLE,
citizen=YES,
married_to_citizen=NOT_APPLICABLE,
marriage_certificate_present=NOT_APPLICABLE,
literate=YES,
literate_witness_present=NOT_APPLICABLE,
consent_ability=YES,
consented=YES,
reasons_ineligible={'None'})
def test_default_ok(self):
form = SubjectScreeningForm(data=self.screening_data)
form.is_valid()
self.assertEqual(form.errors, {})
self.assertTrue(form.save())
def test_citizen_married_to_citizen_not_applicable(self):
data = copy(self.screening_data)
data.update(
citizen=YES,
married_to_citizen=YES,
marriage_certificate_present=YES)
form = SubjectScreeningForm(data=data)
form.is_valid()
self.assertEqual(
form.errors, {'married_to_citizen':
['This field is not applicable']})
def test_not_literate_witness_not_applicable(self):
data = copy(self.screening_data)
data.update(
literate=NO)
form = SubjectScreeningForm(data=data)
form.is_valid()
self.assertEqual(
form.errors, {'literate_witness_present':
['This field is applicable']})
def test_is_minor_guardian_none(self):
data = copy(self.screening_data)
data.update(
age_in_years=8)
form = SubjectScreeningForm(data=data)
form.is_valid()
self.assertEqual(
form.errors, {'guardian_present':
['This field is required.']})
| true | true |
1c32d0dbb302a6369ad6d5b15600eda528f552aa | 38,427 | py | Python | atlassian/bamboo.py | ezchi/atlassian-python-api | 721a4895348fc410b4781f2c1af01b6ddf80b9c5 | [
"Apache-2.0"
] | null | null | null | atlassian/bamboo.py | ezchi/atlassian-python-api | 721a4895348fc410b4781f2c1af01b6ddf80b9c5 | [
"Apache-2.0"
] | null | null | null | atlassian/bamboo.py | ezchi/atlassian-python-api | 721a4895348fc410b4781f2c1af01b6ddf80b9c5 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import logging
from requests.exceptions import HTTPError
from .rest_client import AtlassianRestAPI
log = logging.getLogger(__name__)
class Bamboo(AtlassianRestAPI):
"""Private methods"""
def _get_generator(
self,
path,
elements_key="results",
element_key="result",
data=None,
flags=None,
params=None,
headers=None,
max_results=None,
):
"""
Generic method to return a generator with the results returned from Bamboo. It is intended to work for
responses in the form:
{
'results':
{
'size': 5,
'start-index': 0,
'max-result': 5,
'result': []
},
...
}
In this case we would have elements_key='results' element_key='result'.
The only reason to use this generator is to abstract dealing with response pagination from the client
:param path: URI for the resource
:return: generator with the contents of response[elements_key][element_key]
"""
response = self.get(path, data, flags, params, headers)
if self.advanced_mode:
try:
response.raise_for_status()
response = response.json()
except HTTPError as e:
logging.error("Broken response: {}".format(e))
yield e
try:
results = response[elements_key]
size = 0
# Check if we still can get results
if size > max_results or results["size"] == 0:
return
for r in results[element_key]:
size += 1
yield r
except TypeError:
logging.error("Broken response: {}".format(response))
yield response
def base_list_call(
self, resource, expand, favourite, clover_enabled, max_results, label=None, start_index=0, **kwargs
):
flags = []
params = {"max-results": max_results}
if expand:
params["expand"] = expand
if favourite:
flags.append("favourite")
if clover_enabled:
flags.append("cloverEnabled")
if label:
params["label"] = label
params.update(kwargs)
if "elements_key" in kwargs and "element_key" in kwargs:
return self._get_generator(
self.resource_url(resource),
flags=flags,
params=params,
elements_key=kwargs["elements_key"],
element_key=kwargs["element_key"],
max_results=max_results,
)
params["start-index"] = start_index
return self.get(self.resource_url(resource), flags=flags, params=params)
""" Projects & Plans """
def projects(self, expand=None, favourite=False, clover_enabled=False, max_results=25):
return self.base_list_call(
"project",
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
max_results=max_results,
elements_key="projects",
element_key="project",
)
def project(self, project_key, expand=None, favourite=False, clover_enabled=False):
resource = "project/{}".format(project_key)
return self.base_list_call(
resource=resource,
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
start_index=0,
max_results=25,
)
def project_plans(self, project_key, start_index=0, max_results=25):
"""
Returns a generator with the plans in a given project
:param project_key: Project key
:param start_index:
:param max_results:
:return: Generator with plans
"""
resource = "project/{}".format(project_key)
return self.base_list_call(
resource,
expand="plans",
favourite=False,
clover_enabled=False,
start_index=start_index,
max_results=max_results,
elements_key="plans",
element_key="plan",
)
def plans(
self,
expand=None,
favourite=False,
clover_enabled=False,
start_index=0,
max_results=25,
):
return self.base_list_call(
"plan",
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
start_index=start_index,
max_results=max_results,
elements_key="plans",
element_key="plan",
)
def plan_directory_info(self, plan_key):
"""
Returns information about the directories where artifacts, build logs, and build results will be stored.
Disabled by default.
See https://confluence.atlassian.com/display/BAMBOO/Plan+directory+information+REST+API for more information.
:param plan_key:
:return:
"""
resource = "planDirectoryInfo/{}".format(plan_key)
return self.get(self.resource_url(resource))
def get_plan(self, plan_key, expand=None):
"""
Get plan information.
:param plan_key:
:param expand: optional
:return:
"""
params = {}
if expand:
params["expand"] = expand
resource = "rest/api/latest/plan/{}".format(plan_key)
return self.get(resource, params=params)
def delete_plan(self, plan_key):
"""
Marks plan for deletion. Plan will be deleted by a batch job.
:param plan_key:
:return:
"""
resource = "rest/api/latest/plan/{}".format(plan_key)
return self.delete(resource)
def disable_plan(self, plan_key):
"""
Disable plan.
:param plan_key: str TST-BLD
:return: DELETE request
"""
resource = "plan/{plan_key}/enable".format(plan_key=plan_key)
return self.delete(self.resource_url(resource))
def enable_plan(self, plan_key):
"""
Enable plan.
:param plan_key: str TST-BLD
:return: POST request
"""
resource = "plan/{plan_key}/enable".format(plan_key=plan_key)
return self.post(self.resource_url(resource))
""" Branches """
def search_branches(self, plan_key, include_default_branch=True, max_results=25, start=0):
params = {
"max-result": max_results,
"start-index": start,
"masterPlanKey": plan_key,
"includeMasterBranch": include_default_branch,
}
size = 1
while params["start-index"] < size:
results = self.get(self.resource_url("search/branches"), params=params)
size = results["size"]
for r in results["searchResults"]:
yield r
params["start-index"] += results["max-result"]
def plan_branches(
self,
plan_key,
expand=None,
favourite=False,
clover_enabled=False,
max_results=25,
):
"""api/1.0/plan/{projectKey}-{buildKey}/branch"""
resource = "plan/{}/branch".format(plan_key)
return self.base_list_call(
resource,
expand,
favourite,
clover_enabled,
max_results,
elements_key="branches",
element_key="branch",
)
def get_branch_info(self, plan_key, branch_name):
"""
Get information about a plan branch
:param plan_key:
:param branch_name:
:return:
"""
resource = "plan/{plan_key}/branch/{branch_name}".format(plan_key=plan_key, branch_name=branch_name)
return self.get(self.resource_url(resource))
def create_branch(
self,
plan_key,
branch_name,
vcs_branch=None,
enabled=False,
cleanup_enabled=False,
):
"""
Method for creating branch for a specified plan.
You can use vcsBranch query param to define which vcsBranch should newly created branch use.
If not specified it will not override vcsBranch from the main plan.
:param plan_key: str TST-BLD
:param branch_name: str new-shiny-branch
:param vcs_branch: str feature/new-shiny-branch, /refs/heads/new-shiny-branch
:param enabled: bool
:param cleanup_enabled: bool
:return: PUT request
"""
resource = "plan/{plan_key}/branch/{branch_name}".format(plan_key=plan_key, branch_name=branch_name)
params = {}
if vcs_branch:
params = dict(
vcsBranch=vcs_branch,
enabled="true" if enabled else "false",
cleanupEnabled="true" if cleanup_enabled else "false",
)
return self.put(self.resource_url(resource), params=params)
def get_vcs_branches(self, plan_key, max_results=25):
"""
Get all vcs names for the current plan
:param plan_key: str TST-BLD
:param max_results
:return:
"""
resource = "plan/{plan_key}/vcsBranches".format(plan_key=plan_key)
return self.base_list_call(
resource,
start_index=0,
max_results=max_results,
clover_enabled=None,
expand=None,
favourite=None,
)
""" Build results """
def results(
self,
project_key=None,
plan_key=None,
job_key=None,
build_number=None,
expand=None,
favourite=False,
clover_enabled=False,
issue_key=None,
label=None,
start_index=0,
max_results=25,
include_all_states=False,
):
"""
Get results as generic method
:param project_key:
:param plan_key:
:param job_key:
:param build_number:
:param expand:
:param favourite:
:param clover_enabled:
:param issue_key:
:param label:
:param start_index:
:param max_results:
:param include_all_states:
:return:
"""
resource = "result"
if project_key and plan_key and job_key and build_number:
resource += "/{}-{}-{}/{}".format(project_key, plan_key, job_key, build_number)
elif project_key and plan_key and build_number:
resource += "/{}-{}/{}".format(project_key, plan_key, build_number)
elif project_key and plan_key:
resource += "/{}-{}".format(project_key, plan_key)
elif project_key:
resource += "/" + project_key
params = {}
if issue_key:
params["issueKey"] = issue_key
if include_all_states:
params["includeAllStates"] = include_all_states
return self.base_list_call(
resource,
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
start_index=start_index,
max_results=max_results,
elements_key="results",
element_key="result",
label=label,
**params
)
def latest_results(
self,
expand=None,
favourite=False,
clover_enabled=False,
label=None,
issue_key=None,
start_index=0,
max_results=25,
include_all_states=False,
):
"""
Get latest Results
:param expand:
:param favourite:
:param clover_enabled:
:param label:
:param issue_key:
:param start_index:
:param max_results:
:param include_all_states:
:return:
"""
return self.results(
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
label=label,
issue_key=issue_key,
start_index=start_index,
max_results=max_results,
include_all_states=include_all_states,
)
def project_latest_results(
self,
project_key,
expand=None,
favourite=False,
clover_enabled=False,
label=None,
issue_key=None,
start_index=0,
max_results=25,
include_all_states=False,
):
"""
Get latest Project Results
:param project_key:
:param expand:
:param favourite:
:param clover_enabled:
:param label:
:param issue_key:
:param start_index:
:param max_results:
:param include_all_states:
:return:
"""
return self.results(
project_key,
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
label=label,
issue_key=issue_key,
start_index=start_index,
max_results=max_results,
include_all_states=include_all_states,
)
def plan_results(
self,
project_key,
plan_key,
expand=None,
favourite=False,
clover_enabled=False,
label=None,
issue_key=None,
start_index=0,
max_results=25,
include_all_states=False,
):
"""
Get Plan results
:param project_key:
:param plan_key:
:param expand:
:param favourite:
:param clover_enabled:
:param label:
:param issue_key:
:param start_index:
:param max_results:
:param include_all_states:
:return:
"""
return self.results(
project_key,
plan_key,
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
label=label,
issue_key=issue_key,
start_index=start_index,
max_results=max_results,
include_all_states=include_all_states,
)
def build_result(self, build_key, expand=None, include_all_states=False, start=0, max_results=25):
"""
Returns details of a specific build result
:param expand: expands build result details on request. Possible values are: artifacts, comments, labels,
Jira Issues, stages. stages expand is available only for top level plans. It allows to drill down to job results
using stages.stage.results.result. All expand parameters should contain results.result prefix.
:param build_key: Should be in the form XX-YY[-ZZ]-99, that is, the last token should be an integer representing
the build number
:param include_all_states
:param start:
:param max_results:
"""
try:
int(build_key.split("-")[-1])
resource = "result/{}".format(build_key)
return self.base_list_call(
resource,
expand,
favourite=False,
clover_enabled=False,
start_index=start,
max_results=max_results,
include_all_states=include_all_states,
)
except ValueError:
raise ValueError('The key "{}" does not correspond to a build result'.format(build_key))
def build_latest_result(self, plan_key, expand=None, include_all_states=False):
"""
Returns details of a latest build result
:param expand: expands build result details on request. Possible values are: artifacts, comments, labels,
Jira Issues, stages. stages expand is available only for top level plans. It allows to drill down to job results
using stages.stage.results.result. All expand parameters should contain results.result prefix.
:param plan_key: Should be in the form XX-YY[-ZZ]
:param include_all_states:
"""
try:
resource = "result/{}/latest.json".format(plan_key)
return self.base_list_call(
resource,
expand,
favourite=False,
clover_enabled=False,
start_index=0,
max_results=25,
include_all_states=include_all_states,
)
except ValueError:
raise ValueError('The key "{}" does not correspond to the latest build result'.format(plan_key))
def delete_build_result(self, build_key):
"""
Deleting result for specific build
:param build_key: Take full build key, example: PROJECT-PLAN-8
"""
custom_resource = "/build/admin/deletePlanResults.action"
build_key = build_key.split("-")
plan_key = "{}-{}".format(build_key[0], build_key[1])
build_number = build_key[2]
params = {"buildKey": plan_key, "buildNumber": build_number}
return self.post(custom_resource, params=params, headers=self.form_token_headers)
def execute_build(self, plan_key, stage=None, execute_all_stages=True, custom_revision=None, **bamboo_variables):
"""
Fire build execution for specified plan.
!IMPORTANT! NOTE: for some reason, this method always execute all stages
:param plan_key: str TST-BLD
:param stage: str stage-name
:param execute_all_stages: bool
:param custom_revision: str revisionName
:param bamboo_variables: dict {variable=value}
:return: POST request
"""
resource = "queue/{plan_key}".format(plan_key=plan_key)
params = {}
if stage:
execute_all_stages = False
params["stage"] = stage
if custom_revision:
params["customRevision"] = custom_revision
params["executeAllStages"] = "true" if execute_all_stages else "false"
if bamboo_variables:
for key, value in bamboo_variables.items():
params["bamboo.variable.{}".format(key)] = value
return self.post(self.resource_url(resource), params=params)
def stop_build(self, plan_key):
"""
Stop the build which is in progress at the moment.
:param plan_key: str TST-BLD
:return: GET request
"""
resource = "/build/admin/stopPlan.action?planKey={}".format(plan_key)
return self.post(path=resource, headers=self.no_check_headers)
""" Comments & Labels """
def comments(self, project_key, plan_key, build_number, start_index=0, max_results=25):
resource = "result/{}-{}-{}/comment".format(project_key, plan_key, build_number)
params = {"start-index": start_index, "max-results": max_results}
return self.get(self.resource_url(resource), params=params)
def create_comment(self, project_key, plan_key, build_number, comment, author=None):
resource = "result/{}-{}-{}/comment".format(project_key, plan_key, build_number)
comment_data = {
"author": author if author else self.username,
"content": comment,
}
return self.post(self.resource_url(resource), data=comment_data)
def labels(self, project_key, plan_key, build_number, start_index=0, max_results=25):
resource = "result/{}-{}-{}/label".format(project_key, plan_key, build_number)
params = {"start-index": start_index, "max-results": max_results}
return self.get(self.resource_url(resource), params=params)
def create_label(self, project_key, plan_key, build_number, label):
resource = "result/{}-{}-{}/label".format(project_key, plan_key, build_number)
return self.post(self.resource_url(resource), data={"name": label})
def delete_label(self, project_key, plan_key, build_number, label):
resource = "result/{}-{}-{}/label/{}".format(project_key, plan_key, build_number, label)
return self.delete(self.resource_url(resource))
def get_projects(self):
"""Method used to list all projects defined in Bamboo.
Projects without any plan are not listed by default, unless showEmpty query param is set to true."""
resource = "project?showEmpty"
for project in self.get(self.resource_url(resource)):
yield project
def get_project(self, project_key):
"""Method used to retrieve information for project specified as project key.
Possible expand parameters: plans, list of plans for project. plans.plan, list of plans with plan details
(only plans visible - READ permission for user)"""
resource = "project/{}?showEmpty".format(project_key)
return self.get(self.resource_url(resource))
def delete_project(self, project_key):
"""Marks project for deletion. Project will be deleted by a batch job."""
resource = "project/{}".format(project_key)
return self.delete(self.resource_url(resource))
""" Deployments """
def deployment_projects(self):
resource = "deploy/project/all"
for project in self.get(self.resource_url(resource)):
yield project
def deployment_project(self, project_id):
resource = "deploy/project/{}".format(project_id)
return self.get(self.resource_url(resource))
def deployment_environment_results(self, env_id, expand=None, max_results=25):
resource = "deploy/environment/{environmentId}/results".format(environmentId=env_id)
params = {"max-result": max_results, "start-index": 0}
size = 1
if expand:
params["expand"] = expand
while params["start-index"] < size:
results = self.get(self.resource_url(resource), params=params)
size = results["size"]
for r in results["results"]:
yield r
params["start-index"] += results["max-result"]
def deployment_dashboard(self, project_id=None):
"""
Returns the current status of each deployment environment
If no project id is provided, returns all projects.
"""
resource = "deploy/dashboard/{}".format(project_id) if project_id else "deploy/dashboard"
return self.get(self.resource_url(resource))
""" Users & Groups """
def get_users_in_global_permissions(self, start=0, limit=25):
"""
Provide users in global permissions configuration
:param start:
:param limit:
:return:
"""
params = {"limit": limit, "start": start}
url = "rest/api/latest/permissions/global/users"
return self.get(url, params=params)
def get_groups(self, start=0, limit=25):
"""
Retrieve a paginated list of groups.
The authenticated user must have restricted administrative permission or higher to use this resource.
:param start:
:param limit:
:return:
"""
params = {"limit": limit, "start": start}
url = "rest/api/latest/admin/groups"
return self.get(url, params=params)
def create_group(self, group_name):
"""
Create a new group.
The authenticated user must have restricted administrative permission or higher to use this resource.
:param group_name:
:return:
"""
url = "rest/api/latest/admin/groups"
data = {"name": group_name}
return self.post(url, data=data)
def delete_group(self, group_name):
"""
Deletes the specified group, removing it from the system.
The authenticated user must have restricted administrative permission or higher to use this resource.
:param group_name:
:return:
"""
url = "rest/api/latest/admin/groups/{}".format(group_name)
return self.delete(url)
def add_users_into_group(self, group_name, users):
"""
Add multiple users to a group.
The list of usernames should be passed as request body.
The authenticated user must have restricted administrative permission or higher to use this resource.
:param group_name:
:param users: list
:return:
"""
url = "rest/api/latest/admin/groups/{}/add-users".format(group_name)
return self.post(url, data=users)
def remove_users_from_group(self, group_name, users):
"""
Remove multiple users from a group.
The list of usernames should be passed as request body.
The authenticated user must have restricted administrative permission or higher to use this resource.
:param group_name:
:param users: list
:return:
"""
url = "rest/api/latest/admin/groups/{}/remove-users".format(group_name)
return self.delete(url, data=users)
def get_users_from_group(self, group_name, filter_users=None, start=0, limit=25):
"""
Retrieves a list of users that are members of a specified group.
The authenticated user must have restricted administrative permission or higher to use this resource.
:param filter_users:
:param group_name:
:param start:
:param limit:
:return:
"""
params = {"limit": limit, "start": start}
if filter_users:
params = {"filter": filter_users}
url = "rest/api/latest/admin/groups/{}/more-members".format(group_name)
return self.get(url, params=params)
def get_users_not_in_group(self, group_name, filter_users="", start=0, limit=25):
"""
Retrieves a list of users that are not members of a specified group.
The authenticated user must have restricted administrative permission or higher to use this resource.
:param filter_users:
:param group_name:
:param start:
:param limit:
:return:
"""
params = {"limit": limit, "start": start}
if filter_users:
params = {"filter": filter_users}
url = "rest/api/latest/admin/groups/{}/more-non-members".format(group_name)
return self.get(url, params=params)
def get_build_queue(self, expand="queuedBuilds"):
"""
Lists all the builds waiting in the build queue, adds or removes a build from the build queue.
May be used also to resume build on manual stage or rerun failed jobs.
:return:
"""
params = {"expand": expand}
return self.get("rest/api/latest/queue", params=params)
def get_deployment_users(self, deployment_id, filter_name=None, start=0, limit=25):
"""
Retrieve a list of users with their explicit permissions to given resource.
The list can be filtered by some attributes.
This resource is paged and returns a single page of results.
:param deployment_id:
:param filter_name:
:param start:
:param limit:
:return:
"""
params = {"limit": limit, "start": start}
if filter_name:
params = {"name": filter_name}
resource = "permissions/deployment/{}/users".format(deployment_id)
return self.get(self.resource_url(resource), params=params)
def revoke_user_from_deployment(self, deployment_id, user, permissions=["READ", "WRITE", "BUILD"]):
"""
Revokes deployment project permissions from a given user.
:param deployment_id:
:param user:
:param permissions:
:return:
"""
resource = "permissions/deployment/{}/users/{}".format(deployment_id, user)
return self.delete(self.resource_url(resource), data=permissions)
def grant_user_to_deployment(self, deployment_id, user, permissions):
"""
Grants deployment project permissions to a given user.
:param deployment_id:
:param user:
:param permissions:
:return:
"""
resource = "permissions/deployment/{}/users/{}".format(deployment_id, user)
return self.put(self.resource_url(resource), data=permissions)
def get_deployment_groups(self, deployment_id, filter_name=None, start=0, limit=25):
"""
Retrieve a list of groups with their deployment project permissions.
The list can be filtered by some attributes.
This resource is paged returns a single page of results.
:param deployment_id:
:param filter_name:
:param start:
:param limit:
:return:
"""
params = {"limit": limit, "start": start}
if filter_name:
params = {"name": filter_name}
resource = "permissions/deployment/{}/groups".format(deployment_id)
return self.get(self.resource_url(resource), params=params)
def revoke_group_from_deployment(self, deployment_id, group, permissions=["READ", "WRITE", "BUILD"]):
"""
Revokes deployment project permissions from a given group.
:param deployment_id:
:param group:
:param permissions:
:return:
"""
resource = "permissions/deployment/{}/groups/{}".format(deployment_id, group)
return self.delete(self.resource_url(resource), data=permissions)
def grant_group_to_deployment(self, deployment_id, group, permissions):
"""
Grants deployment project permissions to a given group.
:param deployment_id:
:param group:
:param permissions:
:return:
"""
resource = "permissions/deployment/{}/groups/{}".format(deployment_id, group)
return self.put(self.resource_url(resource), data=permissions)
def get_environment_users(self, environment_id, filter_name=None, start=0, limit=25):
"""
Retrieve a list of users with their explicit permissions to given resource.
The list can be filtered by some attributes.
This resource is paged and returns a single page of results.
:param environment_id:
:param filter_name:
:param start:
:param limit:
:return:
"""
params = {"limit": limit, "start": start}
if filter_name:
params = {"name": filter_name}
resource = "permissions/environment/{}/users".format(environment_id)
return self.get(self.resource_url(resource), params=params)
def revoke_user_from_environment(self, environment_id, user, permissions=["READ", "WRITE", "BUILD"]):
"""
Revokes deployment environment permissions from a given user.
:param environment_id:
:param user:
:param permissions:
:return:
"""
resource = "permissions/environment/{}/users/{}".format(environment_id, user)
return self.delete(self.resource_url(resource), data=permissions)
def grant_user_to_environment(self, environment_id, user, permissions):
"""
Grants deployment environment permissions to a given user.
:param environment_id:
:param user:
:param permissions:
:return:
"""
resource = "permissions/environment/{}/users/{}".format(environment_id, user)
return self.put(self.resource_url(resource), data=permissions)
def get_environment_groups(self, environment_id, filter_name=None, start=0, limit=25):
"""
Retrieve a list of groups with their deployment environment permissions.
The list can be filtered by some attributes.
This resource is paged returns a single page of results.
:param environment_id:
:param filter_name:
:param start:
:param limit:
:return:
"""
params = {"limit": limit, "start": start}
if filter_name:
params = {"name": filter_name}
resource = "permissions/environment/{}/groups".format(environment_id)
return self.get(self.resource_url(resource), params=params)
def revoke_group_from_environment(self, environment_id, group, permissions=["READ", "WRITE", "BUILD"]):
"""
Revokes deployment environment permissions from a given group.
:param environment_id:
:param group:
:param permissions:
:return:
"""
resource = "permissions/environment/{}/groups/{}".format(environment_id, group)
return self.delete(self.resource_url(resource), data=permissions)
def grant_group_to_environment(self, environment_id, group, permissions):
"""
Grants deployment environment permissions to a given group.
:param environment_id:
:param group:
:param permissions:
:return:
"""
resource = "permissions/environment/{}/groups/{}".format(environment_id, group)
return self.put(self.resource_url(resource), data=permissions)
"""Other actions"""
def server_info(self):
return self.get(self.resource_url("info"))
def agent_status(self, online=False):
"""
Provides a list of all agents.
:param online: filter only online agents (default False = all)
:return:
"""
return self.get(self.resource_url("agent"), params={"online": online})
def agent_is_online(self, agent_id):
"""
Get agent online status.
:param agent_id: Bamboo agent ID (integer number)
:return: True/False
"""
response = self.get(self.resource_url("agent/{}/status".format(agent_id)))
return response["online"]
def agent_enable(self, agent_id):
"""
Enable agent
:param agent_id: Bamboo agent ID (integer number)
:return: None
"""
self.put(self.resource_url("agent/{}/enable".format(agent_id)))
def agent_disable(self, agent_id):
"""
Disable agent
:param agent_id: Bamboo agent ID (integer number)
:return: None
"""
self.put(self.resource_url("agent/{}/disable".format(agent_id)))
def agent_remote(self, online=False):
"""
Provides a list of all agent authentication statuses.
:param online: list only online agents (default False = all)
:return: list of agent-describing dictionaries
"""
return self.get(self.resource_url("agent/remote"), params={"online": online})
def agent_details(self, agent_id, expand=None):
"""
Provides details of an agent with given id.
:param agent_id: Bamboo agent ID (integer number)
:param expand: Expand fields (None, capabilities, executableEnvironments, executableJobs)
:return:
"""
params = None
if expand:
params = {"expand": expand}
return self.get(self.resource_url("agent/{}".format(agent_id)), params=params)
def agent_capabilities(self, agent_id, include_shared=True):
"""
List agent's capabilities.
:param agent_id: Bamboo agent ID (integer number)
:param include_shared: Include shared capabilities
:return: agents
"""
return self.get(
self.resource_url("agent/{}/capability".format(agent_id)), params={"includeShared": include_shared}
)
def activity(self):
return self.get("build/admin/ajax/getDashboardSummary.action")
def get_custom_expiry(self, limit=25):
"""
Get list of all plans where user has admin permission and which override global expiry settings.
If global expiry is not enabled it returns empty response.
:param limit:
"""
url = "rest/api/latest/admin/expiry/custom/plan?limit={}".format(limit)
return self.get(url)
def reports(self, max_results=25):
params = {"max-results": max_results}
return self._get_generator(
self.resource_url("chart/reports"),
elements_key="reports",
element_key="report",
params=params,
)
def chart(
self,
report_key,
build_keys,
group_by_period,
date_filter=None,
date_from=None,
date_to=None,
width=None,
height=None,
start_index=9,
max_results=25,
):
params = {
"reportKey": report_key,
"buildKeys": build_keys,
"groupByPeriod": group_by_period,
"start-index": start_index,
"max-results": max_results,
}
if date_filter:
params["dateFilter"] = date_filter
if date_filter == "RANGE":
params["dateFrom"] = date_from
params["dateTo"] = date_to
if width:
params["width"] = width
if height:
params["height"] = height
return self.get(self.resource_url("chart"), params=params)
def reindex(self):
"""
Returns status of the current indexing operation.
reindexInProgress - reindex is currently performed in background reindexPending - reindex is required
(i.e. it failed before or some upgrade task asked for it)
"""
return self.get(self.resource_url("reindex"))
def stop_reindex(self):
"""
Kicks off a reindex. Requires system admin permissions to perform this reindex.
"""
return self.post(self.resource_url("reindex"))
def health_check(self):
"""
Get health status
https://confluence.atlassian.com/jirakb/how-to-retrieve-health-check-results-using-rest-api-867195158.html
:return:
"""
# check as Troubleshooting & Support Tools Plugin
response = self.get("rest/troubleshooting/1.0/check/")
if not response:
# check as support tools
response = self.get("rest/supportHealthCheck/1.0/check/")
return response
def upload_plugin(self, plugin_path):
"""
Provide plugin path for upload into Jira e.g. useful for auto deploy
:param plugin_path:
:return:
"""
files = {"plugin": open(plugin_path, "rb")}
upm_token = self.request(
method="GET",
path="rest/plugins/1.0/",
headers=self.no_check_headers,
trailing=True,
).headers["upm-token"]
url = "rest/plugins/1.0/?token={upm_token}".format(upm_token=upm_token)
return self.post(url, files=files, headers=self.no_check_headers)
| 35.449262 | 120 | 0.601608 |
import logging
from requests.exceptions import HTTPError
from .rest_client import AtlassianRestAPI
log = logging.getLogger(__name__)
class Bamboo(AtlassianRestAPI):
def _get_generator(
self,
path,
elements_key="results",
element_key="result",
data=None,
flags=None,
params=None,
headers=None,
max_results=None,
):
response = self.get(path, data, flags, params, headers)
if self.advanced_mode:
try:
response.raise_for_status()
response = response.json()
except HTTPError as e:
logging.error("Broken response: {}".format(e))
yield e
try:
results = response[elements_key]
size = 0
if size > max_results or results["size"] == 0:
return
for r in results[element_key]:
size += 1
yield r
except TypeError:
logging.error("Broken response: {}".format(response))
yield response
def base_list_call(
self, resource, expand, favourite, clover_enabled, max_results, label=None, start_index=0, **kwargs
):
flags = []
params = {"max-results": max_results}
if expand:
params["expand"] = expand
if favourite:
flags.append("favourite")
if clover_enabled:
flags.append("cloverEnabled")
if label:
params["label"] = label
params.update(kwargs)
if "elements_key" in kwargs and "element_key" in kwargs:
return self._get_generator(
self.resource_url(resource),
flags=flags,
params=params,
elements_key=kwargs["elements_key"],
element_key=kwargs["element_key"],
max_results=max_results,
)
params["start-index"] = start_index
return self.get(self.resource_url(resource), flags=flags, params=params)
def projects(self, expand=None, favourite=False, clover_enabled=False, max_results=25):
return self.base_list_call(
"project",
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
max_results=max_results,
elements_key="projects",
element_key="project",
)
def project(self, project_key, expand=None, favourite=False, clover_enabled=False):
resource = "project/{}".format(project_key)
return self.base_list_call(
resource=resource,
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
start_index=0,
max_results=25,
)
def project_plans(self, project_key, start_index=0, max_results=25):
resource = "project/{}".format(project_key)
return self.base_list_call(
resource,
expand="plans",
favourite=False,
clover_enabled=False,
start_index=start_index,
max_results=max_results,
elements_key="plans",
element_key="plan",
)
def plans(
self,
expand=None,
favourite=False,
clover_enabled=False,
start_index=0,
max_results=25,
):
return self.base_list_call(
"plan",
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
start_index=start_index,
max_results=max_results,
elements_key="plans",
element_key="plan",
)
def plan_directory_info(self, plan_key):
resource = "planDirectoryInfo/{}".format(plan_key)
return self.get(self.resource_url(resource))
def get_plan(self, plan_key, expand=None):
params = {}
if expand:
params["expand"] = expand
resource = "rest/api/latest/plan/{}".format(plan_key)
return self.get(resource, params=params)
def delete_plan(self, plan_key):
resource = "rest/api/latest/plan/{}".format(plan_key)
return self.delete(resource)
def disable_plan(self, plan_key):
resource = "plan/{plan_key}/enable".format(plan_key=plan_key)
return self.delete(self.resource_url(resource))
def enable_plan(self, plan_key):
resource = "plan/{plan_key}/enable".format(plan_key=plan_key)
return self.post(self.resource_url(resource))
def search_branches(self, plan_key, include_default_branch=True, max_results=25, start=0):
params = {
"max-result": max_results,
"start-index": start,
"masterPlanKey": plan_key,
"includeMasterBranch": include_default_branch,
}
size = 1
while params["start-index"] < size:
results = self.get(self.resource_url("search/branches"), params=params)
size = results["size"]
for r in results["searchResults"]:
yield r
params["start-index"] += results["max-result"]
def plan_branches(
self,
plan_key,
expand=None,
favourite=False,
clover_enabled=False,
max_results=25,
):
resource = "plan/{}/branch".format(plan_key)
return self.base_list_call(
resource,
expand,
favourite,
clover_enabled,
max_results,
elements_key="branches",
element_key="branch",
)
def get_branch_info(self, plan_key, branch_name):
resource = "plan/{plan_key}/branch/{branch_name}".format(plan_key=plan_key, branch_name=branch_name)
return self.get(self.resource_url(resource))
def create_branch(
self,
plan_key,
branch_name,
vcs_branch=None,
enabled=False,
cleanup_enabled=False,
):
resource = "plan/{plan_key}/branch/{branch_name}".format(plan_key=plan_key, branch_name=branch_name)
params = {}
if vcs_branch:
params = dict(
vcsBranch=vcs_branch,
enabled="true" if enabled else "false",
cleanupEnabled="true" if cleanup_enabled else "false",
)
return self.put(self.resource_url(resource), params=params)
def get_vcs_branches(self, plan_key, max_results=25):
resource = "plan/{plan_key}/vcsBranches".format(plan_key=plan_key)
return self.base_list_call(
resource,
start_index=0,
max_results=max_results,
clover_enabled=None,
expand=None,
favourite=None,
)
def results(
self,
project_key=None,
plan_key=None,
job_key=None,
build_number=None,
expand=None,
favourite=False,
clover_enabled=False,
issue_key=None,
label=None,
start_index=0,
max_results=25,
include_all_states=False,
):
resource = "result"
if project_key and plan_key and job_key and build_number:
resource += "/{}-{}-{}/{}".format(project_key, plan_key, job_key, build_number)
elif project_key and plan_key and build_number:
resource += "/{}-{}/{}".format(project_key, plan_key, build_number)
elif project_key and plan_key:
resource += "/{}-{}".format(project_key, plan_key)
elif project_key:
resource += "/" + project_key
params = {}
if issue_key:
params["issueKey"] = issue_key
if include_all_states:
params["includeAllStates"] = include_all_states
return self.base_list_call(
resource,
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
start_index=start_index,
max_results=max_results,
elements_key="results",
element_key="result",
label=label,
**params
)
def latest_results(
self,
expand=None,
favourite=False,
clover_enabled=False,
label=None,
issue_key=None,
start_index=0,
max_results=25,
include_all_states=False,
):
return self.results(
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
label=label,
issue_key=issue_key,
start_index=start_index,
max_results=max_results,
include_all_states=include_all_states,
)
def project_latest_results(
self,
project_key,
expand=None,
favourite=False,
clover_enabled=False,
label=None,
issue_key=None,
start_index=0,
max_results=25,
include_all_states=False,
):
return self.results(
project_key,
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
label=label,
issue_key=issue_key,
start_index=start_index,
max_results=max_results,
include_all_states=include_all_states,
)
def plan_results(
self,
project_key,
plan_key,
expand=None,
favourite=False,
clover_enabled=False,
label=None,
issue_key=None,
start_index=0,
max_results=25,
include_all_states=False,
):
return self.results(
project_key,
plan_key,
expand=expand,
favourite=favourite,
clover_enabled=clover_enabled,
label=label,
issue_key=issue_key,
start_index=start_index,
max_results=max_results,
include_all_states=include_all_states,
)
def build_result(self, build_key, expand=None, include_all_states=False, start=0, max_results=25):
try:
int(build_key.split("-")[-1])
resource = "result/{}".format(build_key)
return self.base_list_call(
resource,
expand,
favourite=False,
clover_enabled=False,
start_index=start,
max_results=max_results,
include_all_states=include_all_states,
)
except ValueError:
raise ValueError('The key "{}" does not correspond to a build result'.format(build_key))
def build_latest_result(self, plan_key, expand=None, include_all_states=False):
try:
resource = "result/{}/latest.json".format(plan_key)
return self.base_list_call(
resource,
expand,
favourite=False,
clover_enabled=False,
start_index=0,
max_results=25,
include_all_states=include_all_states,
)
except ValueError:
raise ValueError('The key "{}" does not correspond to the latest build result'.format(plan_key))
def delete_build_result(self, build_key):
custom_resource = "/build/admin/deletePlanResults.action"
build_key = build_key.split("-")
plan_key = "{}-{}".format(build_key[0], build_key[1])
build_number = build_key[2]
params = {"buildKey": plan_key, "buildNumber": build_number}
return self.post(custom_resource, params=params, headers=self.form_token_headers)
def execute_build(self, plan_key, stage=None, execute_all_stages=True, custom_revision=None, **bamboo_variables):
resource = "queue/{plan_key}".format(plan_key=plan_key)
params = {}
if stage:
execute_all_stages = False
params["stage"] = stage
if custom_revision:
params["customRevision"] = custom_revision
params["executeAllStages"] = "true" if execute_all_stages else "false"
if bamboo_variables:
for key, value in bamboo_variables.items():
params["bamboo.variable.{}".format(key)] = value
return self.post(self.resource_url(resource), params=params)
def stop_build(self, plan_key):
resource = "/build/admin/stopPlan.action?planKey={}".format(plan_key)
return self.post(path=resource, headers=self.no_check_headers)
def comments(self, project_key, plan_key, build_number, start_index=0, max_results=25):
resource = "result/{}-{}-{}/comment".format(project_key, plan_key, build_number)
params = {"start-index": start_index, "max-results": max_results}
return self.get(self.resource_url(resource), params=params)
def create_comment(self, project_key, plan_key, build_number, comment, author=None):
resource = "result/{}-{}-{}/comment".format(project_key, plan_key, build_number)
comment_data = {
"author": author if author else self.username,
"content": comment,
}
return self.post(self.resource_url(resource), data=comment_data)
def labels(self, project_key, plan_key, build_number, start_index=0, max_results=25):
resource = "result/{}-{}-{}/label".format(project_key, plan_key, build_number)
params = {"start-index": start_index, "max-results": max_results}
return self.get(self.resource_url(resource), params=params)
def create_label(self, project_key, plan_key, build_number, label):
resource = "result/{}-{}-{}/label".format(project_key, plan_key, build_number)
return self.post(self.resource_url(resource), data={"name": label})
def delete_label(self, project_key, plan_key, build_number, label):
resource = "result/{}-{}-{}/label/{}".format(project_key, plan_key, build_number, label)
return self.delete(self.resource_url(resource))
def get_projects(self):
resource = "project?showEmpty"
for project in self.get(self.resource_url(resource)):
yield project
def get_project(self, project_key):
resource = "project/{}?showEmpty".format(project_key)
return self.get(self.resource_url(resource))
def delete_project(self, project_key):
resource = "project/{}".format(project_key)
return self.delete(self.resource_url(resource))
def deployment_projects(self):
resource = "deploy/project/all"
for project in self.get(self.resource_url(resource)):
yield project
def deployment_project(self, project_id):
resource = "deploy/project/{}".format(project_id)
return self.get(self.resource_url(resource))
def deployment_environment_results(self, env_id, expand=None, max_results=25):
resource = "deploy/environment/{environmentId}/results".format(environmentId=env_id)
params = {"max-result": max_results, "start-index": 0}
size = 1
if expand:
params["expand"] = expand
while params["start-index"] < size:
results = self.get(self.resource_url(resource), params=params)
size = results["size"]
for r in results["results"]:
yield r
params["start-index"] += results["max-result"]
def deployment_dashboard(self, project_id=None):
resource = "deploy/dashboard/{}".format(project_id) if project_id else "deploy/dashboard"
return self.get(self.resource_url(resource))
def get_users_in_global_permissions(self, start=0, limit=25):
params = {"limit": limit, "start": start}
url = "rest/api/latest/permissions/global/users"
return self.get(url, params=params)
def get_groups(self, start=0, limit=25):
params = {"limit": limit, "start": start}
url = "rest/api/latest/admin/groups"
return self.get(url, params=params)
def create_group(self, group_name):
url = "rest/api/latest/admin/groups"
data = {"name": group_name}
return self.post(url, data=data)
def delete_group(self, group_name):
url = "rest/api/latest/admin/groups/{}".format(group_name)
return self.delete(url)
def add_users_into_group(self, group_name, users):
url = "rest/api/latest/admin/groups/{}/add-users".format(group_name)
return self.post(url, data=users)
def remove_users_from_group(self, group_name, users):
url = "rest/api/latest/admin/groups/{}/remove-users".format(group_name)
return self.delete(url, data=users)
def get_users_from_group(self, group_name, filter_users=None, start=0, limit=25):
params = {"limit": limit, "start": start}
if filter_users:
params = {"filter": filter_users}
url = "rest/api/latest/admin/groups/{}/more-members".format(group_name)
return self.get(url, params=params)
def get_users_not_in_group(self, group_name, filter_users="", start=0, limit=25):
params = {"limit": limit, "start": start}
if filter_users:
params = {"filter": filter_users}
url = "rest/api/latest/admin/groups/{}/more-non-members".format(group_name)
return self.get(url, params=params)
def get_build_queue(self, expand="queuedBuilds"):
params = {"expand": expand}
return self.get("rest/api/latest/queue", params=params)
def get_deployment_users(self, deployment_id, filter_name=None, start=0, limit=25):
params = {"limit": limit, "start": start}
if filter_name:
params = {"name": filter_name}
resource = "permissions/deployment/{}/users".format(deployment_id)
return self.get(self.resource_url(resource), params=params)
def revoke_user_from_deployment(self, deployment_id, user, permissions=["READ", "WRITE", "BUILD"]):
resource = "permissions/deployment/{}/users/{}".format(deployment_id, user)
return self.delete(self.resource_url(resource), data=permissions)
def grant_user_to_deployment(self, deployment_id, user, permissions):
resource = "permissions/deployment/{}/users/{}".format(deployment_id, user)
return self.put(self.resource_url(resource), data=permissions)
def get_deployment_groups(self, deployment_id, filter_name=None, start=0, limit=25):
params = {"limit": limit, "start": start}
if filter_name:
params = {"name": filter_name}
resource = "permissions/deployment/{}/groups".format(deployment_id)
return self.get(self.resource_url(resource), params=params)
def revoke_group_from_deployment(self, deployment_id, group, permissions=["READ", "WRITE", "BUILD"]):
resource = "permissions/deployment/{}/groups/{}".format(deployment_id, group)
return self.delete(self.resource_url(resource), data=permissions)
def grant_group_to_deployment(self, deployment_id, group, permissions):
resource = "permissions/deployment/{}/groups/{}".format(deployment_id, group)
return self.put(self.resource_url(resource), data=permissions)
def get_environment_users(self, environment_id, filter_name=None, start=0, limit=25):
params = {"limit": limit, "start": start}
if filter_name:
params = {"name": filter_name}
resource = "permissions/environment/{}/users".format(environment_id)
return self.get(self.resource_url(resource), params=params)
def revoke_user_from_environment(self, environment_id, user, permissions=["READ", "WRITE", "BUILD"]):
resource = "permissions/environment/{}/users/{}".format(environment_id, user)
return self.delete(self.resource_url(resource), data=permissions)
def grant_user_to_environment(self, environment_id, user, permissions):
resource = "permissions/environment/{}/users/{}".format(environment_id, user)
return self.put(self.resource_url(resource), data=permissions)
def get_environment_groups(self, environment_id, filter_name=None, start=0, limit=25):
params = {"limit": limit, "start": start}
if filter_name:
params = {"name": filter_name}
resource = "permissions/environment/{}/groups".format(environment_id)
return self.get(self.resource_url(resource), params=params)
def revoke_group_from_environment(self, environment_id, group, permissions=["READ", "WRITE", "BUILD"]):
resource = "permissions/environment/{}/groups/{}".format(environment_id, group)
return self.delete(self.resource_url(resource), data=permissions)
def grant_group_to_environment(self, environment_id, group, permissions):
resource = "permissions/environment/{}/groups/{}".format(environment_id, group)
return self.put(self.resource_url(resource), data=permissions)
def server_info(self):
return self.get(self.resource_url("info"))
def agent_status(self, online=False):
return self.get(self.resource_url("agent"), params={"online": online})
def agent_is_online(self, agent_id):
response = self.get(self.resource_url("agent/{}/status".format(agent_id)))
return response["online"]
def agent_enable(self, agent_id):
self.put(self.resource_url("agent/{}/enable".format(agent_id)))
def agent_disable(self, agent_id):
self.put(self.resource_url("agent/{}/disable".format(agent_id)))
def agent_remote(self, online=False):
return self.get(self.resource_url("agent/remote"), params={"online": online})
def agent_details(self, agent_id, expand=None):
params = None
if expand:
params = {"expand": expand}
return self.get(self.resource_url("agent/{}".format(agent_id)), params=params)
def agent_capabilities(self, agent_id, include_shared=True):
return self.get(
self.resource_url("agent/{}/capability".format(agent_id)), params={"includeShared": include_shared}
)
def activity(self):
return self.get("build/admin/ajax/getDashboardSummary.action")
def get_custom_expiry(self, limit=25):
url = "rest/api/latest/admin/expiry/custom/plan?limit={}".format(limit)
return self.get(url)
def reports(self, max_results=25):
params = {"max-results": max_results}
return self._get_generator(
self.resource_url("chart/reports"),
elements_key="reports",
element_key="report",
params=params,
)
def chart(
self,
report_key,
build_keys,
group_by_period,
date_filter=None,
date_from=None,
date_to=None,
width=None,
height=None,
start_index=9,
max_results=25,
):
params = {
"reportKey": report_key,
"buildKeys": build_keys,
"groupByPeriod": group_by_period,
"start-index": start_index,
"max-results": max_results,
}
if date_filter:
params["dateFilter"] = date_filter
if date_filter == "RANGE":
params["dateFrom"] = date_from
params["dateTo"] = date_to
if width:
params["width"] = width
if height:
params["height"] = height
return self.get(self.resource_url("chart"), params=params)
def reindex(self):
return self.get(self.resource_url("reindex"))
def stop_reindex(self):
return self.post(self.resource_url("reindex"))
def health_check(self):
response = self.get("rest/troubleshooting/1.0/check/")
if not response:
response = self.get("rest/supportHealthCheck/1.0/check/")
return response
def upload_plugin(self, plugin_path):
files = {"plugin": open(plugin_path, "rb")}
upm_token = self.request(
method="GET",
path="rest/plugins/1.0/",
headers=self.no_check_headers,
trailing=True,
).headers["upm-token"]
url = "rest/plugins/1.0/?token={upm_token}".format(upm_token=upm_token)
return self.post(url, files=files, headers=self.no_check_headers)
| true | true |
1c32d4d28c537a81e998d99b7abbb9c41bd7fce6 | 13,439 | py | Python | pdkb/test/aamas.py | QuMuLab/PDKB-Planning | 61a96c006b606aa051b2c7c9b5bfc9b6473d2a4d | [
"MIT"
] | 11 | 2020-05-11T15:32:49.000Z | 2021-09-14T17:49:48.000Z | pdkb/test/aamas.py | QuMuLab/PDKB-Planning | 61a96c006b606aa051b2c7c9b5bfc9b6473d2a4d | [
"MIT"
] | 1 | 2020-04-22T13:16:49.000Z | 2020-04-22T13:16:49.000Z | pdkb/test/aamas.py | QuMuLab/PDKB-Planning | 61a96c006b606aa051b2c7c9b5bfc9b6473d2a4d | [
"MIT"
] | 4 | 2020-04-16T08:51:17.000Z | 2021-09-14T17:51:50.000Z |
import sys, random, time
from pdkb.kd45 import *
from pdkb.indexed_kd45 import *
from pdkb.pinf import *
from pdkb.rml import *
from pdkb.test.utils import random_pdkb, random_rml, write_file, append_file
TYPE = 'normal'
NUM_PDKBS = 10
QUERIES_PER_PDKB = 10
if 'small' == TYPE:
AGENTS = (2,3)
DEPTH = (2,3)
FLUENTS = list(map(Literal, 'pqr'))
FLUENT_RANGE = (2,3)
RMLS = (3,8)
elif 'normal' == TYPE:
AGENTS = (3,6)
DEPTH = (4,7)
FLUENTS = list(map(Literal, 'pqrst'))
FLUENT_RANGE = (3,5)
RMLS = (13,39)
elif 'big' == TYPE:
AGENTS = (3,10)
DEPTH = (3,10)
FLUENTS = list(map(Literal, 'pqrstvwxyz'))
FLUENT_RANGE = (5,10)
RMLS = (50,150)
else:
assert False, "Bad experiment type: %s" % TYPE
def now():
return time.time()
def doit():
skip_ag = 0
skip_dep = 0
if skip_ag == 0 and skip_dep == 0:
write_file('aamas.csv', 'agents,depth,fluents,inf-size,closed-size,reduced-size,inf-query,closed-query,reduced-query,inf-update,closed-update,reduced-update')
for ag in range(AGENTS[0], AGENTS[1]+1):
for dep in range(DEPTH[0], DEPTH[1]+1):
if ag < skip_ag:
continue
elif ag == skip_ag and dep < skip_dep:
continue
print()
print("--------------")
print(" %d x %d" % (ag, dep))
print("--------------")
(times, sizes) = get_size_and_time(ag, dep, FLUENTS)
print()
print("-------------------------")
append_file('aamas.csv', "\n%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f" % (ag, dep, len(FLUENTS), sizes[0], sizes[1], sizes[2], times[0], times[1], times[2], times[3], times[4], times[5]))
#csv = ['agents,depth,fluents,reduced-rmls,closed-rmls,inf-size,closed-size,reduced-size,inf-pre,closed-pre,inf-query,closed-query,reduced-query,result']
#csv_yes = [csv[0]]
#csv_no = [csv[0]]
#kbs.append(random_pdkb(random.randint(DEPTH[0], DEPTH[1]),
# random.randint(AGENTS[0], AGENTS[1]),
# FLUENTS[:random.randint(FLUENT_RANGE[0], FLUENT_RANGE[1])],
# random.randint(RMLS[0], RMLS[1]),
# False))
#write_file('aamas.csv', csv)
#write_file('aamas-no.csv', csv_no)
#write_file('aamas-yes.csv', csv_yes)
#write_file('aamas-all.csv', csv_yes + csv_no[1:])
def get_size_and_time(num_agents, depth, fluents):
agents = list(range(1, num_agents + 1))
def generate_kbs():
numRMLs = num_agents * depth * 2
closed_kb = PDKB(depth, agents, fluents)
indexed_kb = IndexedPDKB(depth, agents, fluents)
count = 0
while count < numRMLs:
next_rml = random_rml(depth, agents, fluents)
if not closed_kb.query(neg(next_rml)):
closed_kb.add_rml(next_rml)
closed_kb.logically_close()
indexed_kb.expand(set([next_rml]))
count += 1
inf_kb = INF.PDKB2INF(closed_kb)
return (inf_kb, closed_kb, indexed_kb)
'''
print
print "Generating %d PDKBs..." % NUM_PDKBS
kbs = []
infs = []
indexed_kbs = []
progress = 10
trial = 1
for i in range(NUM_PDKBS):
if trial > progress:
print "%d%%" % progress
progress += 10
trial += 1
(inf_kb, closed_kb, indexed_kb) = generate_kbs()
kbs.append(closed_kb)
indexed_kbs.append(indexed_kb)
infs.append(inf_kb)
print
print "Closing PDKBs..."
closed_kbs = [kb.copy() for kb in kbs]
closure_time = []
progress = 10
trial = 1
for kb in closed_kbs:
if trial > progress:
print "%d%%" % progress
progress += 10
trial += 1
start = now()
kb.logically_close()
assert kb.is_consistent()
closure_time.append(now() - start)
print
print "Computing INFs..."
for kb in kbs:
start = now()
infs.append(INF.PDKB2INF(kb))
inf_time.append(now() - start)
'''
def run_queries(index, rml, infs_kb, closed_kb, indexed_kb):
start = now()
ans1 = infs_kb.query(rml)
inf_query = now() - start
start = now()
#ans2 = rml in closed_kbs[index].rmls
ans2 = closed_kb.query(rml)
closed_query = now() - start
start = now()
ans3 = indexed_kb.query(rml)
unclosed_query = now() - start
assert ans1 == ans2
assert ans2 == ans3
# Copy the KBs to run update commands without changing the original KBs
copy_kb = closed_kb.copy()
copy_indexed_kb = indexed_kb.copy()
#start = now()
# INF update is not yet implemented...
inf_update = 0.0 #now() - start
start = now()
copy_kb.update(set([rml]))
closed_update = now() - start
start = now()
copy_indexed_kb.update(set([rml]))
unclosed_update = now() - start
return (ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update)
#print "Performing random misc queries..."
for i in range(NUM_PDKBS):
#for i in range(0):
(infs_kb, closed_kb, unclosed_kb) = generate_kbs()
for j in range(QUERIES_PER_PDKB):
rml = random_rml(closed_kb.depth, closed_kb.agents, closed_kb.props)
(ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update) = run_queries(i, rml, infs_kb, closed_kb, unclosed_kb)
#(inf_update, closed_update, unclosed_update) =
#csv.append("%d,%d,%d,%d,%d,%d,%d,%d,%f,%f,%f,%f,%f,%s" %
# (len(kbs[i].agents), kbs[i].depth, len(kbs[i].props), len(kbs[i].rmls), len(closed_kbs[i].rmls), infs[i].size(), closed_kbs[i].size(), kbs[i].size(),
# inf_time[i], closure_time[i], inf_query, closed_query, unclosed_query, str(ans1)))
print("Performing random successful queries...")
times = [0.0,0.0,0.0,0.0,0.0,0.0]
for i in range(NUM_PDKBS):
(infs_kb, closed_kb, unclosed_kb) = generate_kbs()
for j in range(QUERIES_PER_PDKB):
# Get a random RML from the PDKB
rml = random.choice(list(closed_kb.rmls))
# Get the closed set
#entailed = list(kd_closure(rml))
# Pick a random element
#rml = random.choice(entailed)
#(infs_kb, closed_kb, unclosed_kb) = generate_kbs()
(ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update) = run_queries(i, rml, infs_kb, closed_kb, unclosed_kb)
assert ans1 == ans2
assert ans2 == ans3
times[0] += inf_query
times[1] += closed_query
times[2] += unclosed_query
times[3] += inf_update
times[4] += closed_update
times[5] += unclosed_update
#csv_yes.append("%d,%d,%d,%d,%d,%d,%d,%d,%f,%f,%f,%f,%f,%s" %
# (len(kbs[i].agents), kbs[i].depth, len(kbs[i].props), len(kbs[i].rmls), len(closed_kbs[i].rmls), infs[i].size(), closed_kbs[i].size(), kbs[i].size(),
# inf_time[i], closure_time[i], inf_query, closed_query, unclosed_query, str(ans1)))
sizes = [0.0, 0.0, 0.0]
print("Performing random unsuccessful queries...")
for i in range(NUM_PDKBS):
(infs_kb, closed_kb, unclosed_kb) = generate_kbs()
sizes[0] += infs_kb.size()
sizes[1] += closed_kb.size()
sizes[2] += unclosed_kb.size()
for j in range(QUERIES_PER_PDKB):
going = True
while going:
rml = random_rml(closed_kb.depth, closed_kb.agents, closed_kb.props)
if rml not in closed_kb.rmls:
going = False
(ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update) = run_queries(i, rml, infs_kb, closed_kb, unclosed_kb)
assert ans1 == ans2
assert ans2 == ans3
times[0] += inf_query
times[1] += closed_query
times[2] += unclosed_query
times[3] += inf_update
times[4] += closed_update
times[5] += unclosed_update
#csv_no.append("%d,%d,%d,%d,%d,%d,%d,%d,%f,%f,%f,%f,%f,%s" %
# (len(kbs[i].agents), kbs[i].depth, len(kbs[i].props), len(kbs[i].rmls), len(closed_kbs[i].rmls), infs[i].size(), closed_kbs[i].size(), kbs[i].size(),
# inf_time[i], closure_time[i], inf_query, closed_query, unclosed_query, str(ans1)))
times[0] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[1] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[2] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[3] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[4] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[5] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
#sizes.append(float(sum([inf.size() for inf in infs])) / float(NUM_PDKBS))
#sizes.append(float(sum([kb.size() for kb in kbs])) / float(NUM_PDKBS))
#sizes.append(float(sum([kb.size() for kb in indexed_kbs])) / float(NUM_PDKBS))
sizes[0] /= float(NUM_PDKBS)
sizes[1] /= float(NUM_PDKBS)
sizes[2] /= float(NUM_PDKBS)
print("\nDone!\n")
return (times, sizes)
def checkit(filename):
data = load_CSV(filename)[1:]
for row in data:
for i in range(len(row)):
if row[i] == '0.000000':
row[i] = '0.000001'
def plot_data(data, inds, labs, cols, zlabel, fname):
data_map = {}
for ag in range(AGENTS[0], AGENTS[1]+1):
data_map[ag] = {}
for dep in range(DEPTH[0], DEPTH[1]+1):
data_map[ag][dep] = {}
for row in data:
data_map[int(row[0])][int(row[1])][inds[0]] = float(row[inds[0]])
data_map[int(row[0])][int(row[1])][inds[1]] = float(row[inds[1]])
data_map[int(row[0])][int(row[1])][inds[2]] = float(row[inds[2]])
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
X, Y = np.meshgrid(np.arange(AGENTS[0], AGENTS[1]+1), np.arange(DEPTH[0], DEPTH[1]+1))
#zs0 = np.array([1 for x,y in zip(np.ravel(X), np.ravel(Y))])
#zs1 = np.array([data_map[x][y][ind1] / data_map[x][y][indnorm] for x,y in zip(np.ravel(X), np.ravel(Y))])
#zs2 = np.array([data_map[x][y][ind2] / data_map[x][y][indnorm] for x,y in zip(np.ravel(X), np.ravel(Y))])
zs0 = np.array([data_map[x][y][inds[0]] for x,y in zip(np.ravel(X), np.ravel(Y))])
zs1 = np.array([data_map[x][y][inds[1]] for x,y in zip(np.ravel(X), np.ravel(Y))])
zs2 = np.array([data_map[x][y][inds[2]] for x,y in zip(np.ravel(X), np.ravel(Y))])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if 'Query Time ($log_e(sec)$)' == zlabel or 'Update Time ($log_e(sec)$)' == zlabel:
print("za = " + str(zs0))
Z0 = np.log(zs0).reshape(X.shape)
print("Z0 = " + str(Z0))
Z1 = np.log(zs1).reshape(X.shape)
Z2 = np.log(zs2).reshape(X.shape)
else:
#ax.set_zticks([])
Z0 = (zs0 / 1000).reshape(X.shape)
Z1 = (zs1 / 1000).reshape(X.shape)
Z2 = (zs2 / 1000).reshape(X.shape)
#ax.plot_wireframe(X, Y, Z0, color='0.75')
ax.plot_wireframe(X, Y, Z0, color=cols[0])
ax.plot_wireframe(X, Y, Z1, color=cols[1])
ax.plot_wireframe(X, Y, Z2, color=cols[2])
#cset = ax.contourf(X, Y, Z0, zdir='z', offset=-100, cmap=matplotlib.cm.coolwarm)
#cset = ax.contourf(X, Y, Z0, zdir='x', offset=0, cmap=matplotlib.cm.coolwarm)
#cset = ax.contourf(X, Y, Z0, zdir='z', offset=0, cmap=matplotlib.cm.coolwarm)
#cset = ax.contourf(X, Y, Z0, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlabel('# of Agents')
ax.set_ylabel('Maximum Depth')
ax.set_zlabel(zlabel)
scatter1_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=cols[0], marker = 's')
scatter2_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=cols[1], marker = 's')
scatter3_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=cols[2], marker = 's')
ax.legend([scatter1_proxy, scatter2_proxy, scatter3_proxy], [labs[0], labs[1], labs[2]], numpoints = 1)
ax.get_xaxis().set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
ax.get_yaxis().set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
plt.show()
col1 = '#1b9e77'
col2 = '#d95f02'
col3 = '#7570b3'
print("Plotting query time...")
plot_data(data, [6, 8, 7], ['INF', '$V_{RML}$', 'Closure'], [col1, col3, col2], 'Query Time ($log_e(sec)$)', 'time.eps')
print("Plotting size...")
plot_data(data, [4, 3, 5], ['Closure', 'INF', '$V_{RML}$'], [col2, col1, col3], 'Size (x1000)', 'size.eps')
print("Plotting update time...")
plot_data(data, [9, 11, 10], ['INF', '$V_{RML}$', 'Closure'], [col1, col3, col2], 'Update Time ($log_e(sec)$)', 'update_time.eps')
| 36.22372 | 193 | 0.564328 |
import sys, random, time
from pdkb.kd45 import *
from pdkb.indexed_kd45 import *
from pdkb.pinf import *
from pdkb.rml import *
from pdkb.test.utils import random_pdkb, random_rml, write_file, append_file
TYPE = 'normal'
NUM_PDKBS = 10
QUERIES_PER_PDKB = 10
if 'small' == TYPE:
AGENTS = (2,3)
DEPTH = (2,3)
FLUENTS = list(map(Literal, 'pqr'))
FLUENT_RANGE = (2,3)
RMLS = (3,8)
elif 'normal' == TYPE:
AGENTS = (3,6)
DEPTH = (4,7)
FLUENTS = list(map(Literal, 'pqrst'))
FLUENT_RANGE = (3,5)
RMLS = (13,39)
elif 'big' == TYPE:
AGENTS = (3,10)
DEPTH = (3,10)
FLUENTS = list(map(Literal, 'pqrstvwxyz'))
FLUENT_RANGE = (5,10)
RMLS = (50,150)
else:
assert False, "Bad experiment type: %s" % TYPE
def now():
return time.time()
def doit():
skip_ag = 0
skip_dep = 0
if skip_ag == 0 and skip_dep == 0:
write_file('aamas.csv', 'agents,depth,fluents,inf-size,closed-size,reduced-size,inf-query,closed-query,reduced-query,inf-update,closed-update,reduced-update')
for ag in range(AGENTS[0], AGENTS[1]+1):
for dep in range(DEPTH[0], DEPTH[1]+1):
if ag < skip_ag:
continue
elif ag == skip_ag and dep < skip_dep:
continue
print()
print("--------------")
print(" %d x %d" % (ag, dep))
print("--------------")
(times, sizes) = get_size_and_time(ag, dep, FLUENTS)
print()
print("-------------------------")
append_file('aamas.csv', "\n%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f" % (ag, dep, len(FLUENTS), sizes[0], sizes[1], sizes[2], times[0], times[1], times[2], times[3], times[4], times[5]))
def get_size_and_time(num_agents, depth, fluents):
agents = list(range(1, num_agents + 1))
def generate_kbs():
numRMLs = num_agents * depth * 2
closed_kb = PDKB(depth, agents, fluents)
indexed_kb = IndexedPDKB(depth, agents, fluents)
count = 0
while count < numRMLs:
next_rml = random_rml(depth, agents, fluents)
if not closed_kb.query(neg(next_rml)):
closed_kb.add_rml(next_rml)
closed_kb.logically_close()
indexed_kb.expand(set([next_rml]))
count += 1
inf_kb = INF.PDKB2INF(closed_kb)
return (inf_kb, closed_kb, indexed_kb)
def run_queries(index, rml, infs_kb, closed_kb, indexed_kb):
start = now()
ans1 = infs_kb.query(rml)
inf_query = now() - start
start = now()
ans2 = closed_kb.query(rml)
closed_query = now() - start
start = now()
ans3 = indexed_kb.query(rml)
unclosed_query = now() - start
assert ans1 == ans2
assert ans2 == ans3
copy_kb = closed_kb.copy()
copy_indexed_kb = indexed_kb.copy()
inf_update = 0.0
start = now()
copy_kb.update(set([rml]))
closed_update = now() - start
start = now()
copy_indexed_kb.update(set([rml]))
unclosed_update = now() - start
return (ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update)
for i in range(NUM_PDKBS):
(infs_kb, closed_kb, unclosed_kb) = generate_kbs()
for j in range(QUERIES_PER_PDKB):
rml = random_rml(closed_kb.depth, closed_kb.agents, closed_kb.props)
(ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update) = run_queries(i, rml, infs_kb, closed_kb, unclosed_kb)
print("Performing random successful queries...")
times = [0.0,0.0,0.0,0.0,0.0,0.0]
for i in range(NUM_PDKBS):
(infs_kb, closed_kb, unclosed_kb) = generate_kbs()
for j in range(QUERIES_PER_PDKB):
rml = random.choice(list(closed_kb.rmls))
(ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update) = run_queries(i, rml, infs_kb, closed_kb, unclosed_kb)
assert ans1 == ans2
assert ans2 == ans3
times[0] += inf_query
times[1] += closed_query
times[2] += unclosed_query
times[3] += inf_update
times[4] += closed_update
times[5] += unclosed_update
sizes = [0.0, 0.0, 0.0]
print("Performing random unsuccessful queries...")
for i in range(NUM_PDKBS):
(infs_kb, closed_kb, unclosed_kb) = generate_kbs()
sizes[0] += infs_kb.size()
sizes[1] += closed_kb.size()
sizes[2] += unclosed_kb.size()
for j in range(QUERIES_PER_PDKB):
going = True
while going:
rml = random_rml(closed_kb.depth, closed_kb.agents, closed_kb.props)
if rml not in closed_kb.rmls:
going = False
(ans1, ans2, ans3, inf_query, closed_query, unclosed_query, inf_update, closed_update, unclosed_update) = run_queries(i, rml, infs_kb, closed_kb, unclosed_kb)
assert ans1 == ans2
assert ans2 == ans3
times[0] += inf_query
times[1] += closed_query
times[2] += unclosed_query
times[3] += inf_update
times[4] += closed_update
times[5] += unclosed_update
times[0] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[1] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[2] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[3] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[4] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
times[5] /= float(NUM_PDKBS * QUERIES_PER_PDKB * 2)
sizes[0] /= float(NUM_PDKBS)
sizes[1] /= float(NUM_PDKBS)
sizes[2] /= float(NUM_PDKBS)
print("\nDone!\n")
return (times, sizes)
def checkit(filename):
data = load_CSV(filename)[1:]
for row in data:
for i in range(len(row)):
if row[i] == '0.000000':
row[i] = '0.000001'
def plot_data(data, inds, labs, cols, zlabel, fname):
data_map = {}
for ag in range(AGENTS[0], AGENTS[1]+1):
data_map[ag] = {}
for dep in range(DEPTH[0], DEPTH[1]+1):
data_map[ag][dep] = {}
for row in data:
data_map[int(row[0])][int(row[1])][inds[0]] = float(row[inds[0]])
data_map[int(row[0])][int(row[1])][inds[1]] = float(row[inds[1]])
data_map[int(row[0])][int(row[1])][inds[2]] = float(row[inds[2]])
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
X, Y = np.meshgrid(np.arange(AGENTS[0], AGENTS[1]+1), np.arange(DEPTH[0], DEPTH[1]+1))
zs0 = np.array([data_map[x][y][inds[0]] for x,y in zip(np.ravel(X), np.ravel(Y))])
zs1 = np.array([data_map[x][y][inds[1]] for x,y in zip(np.ravel(X), np.ravel(Y))])
zs2 = np.array([data_map[x][y][inds[2]] for x,y in zip(np.ravel(X), np.ravel(Y))])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if 'Query Time ($log_e(sec)$)' == zlabel or 'Update Time ($log_e(sec)$)' == zlabel:
print("za = " + str(zs0))
Z0 = np.log(zs0).reshape(X.shape)
print("Z0 = " + str(Z0))
Z1 = np.log(zs1).reshape(X.shape)
Z2 = np.log(zs2).reshape(X.shape)
else:
Z0 = (zs0 / 1000).reshape(X.shape)
Z1 = (zs1 / 1000).reshape(X.shape)
Z2 = (zs2 / 1000).reshape(X.shape)
ax.plot_wireframe(X, Y, Z0, color=cols[0])
ax.plot_wireframe(X, Y, Z1, color=cols[1])
ax.plot_wireframe(X, Y, Z2, color=cols[2])
ax.set_xlabel('# of Agents')
ax.set_ylabel('Maximum Depth')
ax.set_zlabel(zlabel)
scatter1_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=cols[0], marker = 's')
scatter2_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=cols[1], marker = 's')
scatter3_proxy = matplotlib.lines.Line2D([0],[0], linestyle="none", c=cols[2], marker = 's')
ax.legend([scatter1_proxy, scatter2_proxy, scatter3_proxy], [labs[0], labs[1], labs[2]], numpoints = 1)
ax.get_xaxis().set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
ax.get_yaxis().set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
plt.show()
col1 = '#1b9e77'
col2 = '#d95f02'
col3 = '#7570b3'
print("Plotting query time...")
plot_data(data, [6, 8, 7], ['INF', '$V_{RML}$', 'Closure'], [col1, col3, col2], 'Query Time ($log_e(sec)$)', 'time.eps')
print("Plotting size...")
plot_data(data, [4, 3, 5], ['Closure', 'INF', '$V_{RML}$'], [col2, col1, col3], 'Size (x1000)', 'size.eps')
print("Plotting update time...")
plot_data(data, [9, 11, 10], ['INF', '$V_{RML}$', 'Closure'], [col1, col3, col2], 'Update Time ($log_e(sec)$)', 'update_time.eps')
| true | true |
1c32d51cb8737141a11db3eef28fed93d17034df | 20,597 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/rfc3918ipmcminmaxlat_26e8ac413e1c57bed7d65373af19d55d.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/rfc3918ipmcminmaxlat_26e8ac413e1c57bed7d65373af19d55d.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/rfc3918ipmcminmaxlat_26e8ac413e1c57bed7d65373af19d55d.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Rfc3918ipmcMinMaxLat(Base):
"""Sets the latency mode to fetch related statistics for each mode.
The Rfc3918ipmcMinMaxLat class encapsulates a list of rfc3918ipmcMinMaxLat resources that are managed by the user.
A list of resources can be retrieved from the server using the Rfc3918ipmcMinMaxLat.find() method.
The list can be managed by using the Rfc3918ipmcMinMaxLat.add() and Rfc3918ipmcMinMaxLat.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'rfc3918ipmcMinMaxLat'
_SDM_ATT_MAP = {
'ForceApplyQTConfig': 'forceApplyQTConfig',
'InputParameters': 'inputParameters',
'Mode': 'mode',
'Name': 'name',
}
_SDM_ENUM_MAP = {
'mode': ['existingMode', 'newMode'],
}
def __init__(self, parent, list_op=False):
super(Rfc3918ipmcMinMaxLat, self).__init__(parent, list_op)
@property
def LearnFrames(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_23d5d0e6c090e51421209fe48681213c.LearnFrames): An instance of the LearnFrames class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_23d5d0e6c090e51421209fe48681213c import LearnFrames
if self._properties.get('LearnFrames', None) is not None:
return self._properties.get('LearnFrames')
else:
return LearnFrames(self)._select()
@property
def PassCriteria(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_0b0999544178b324ac2991fc06eed5cb.PassCriteria): An instance of the PassCriteria class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_0b0999544178b324ac2991fc06eed5cb import PassCriteria
if self._properties.get('PassCriteria', None) is not None:
return self._properties.get('PassCriteria')
else:
return PassCriteria(self)._select()
@property
def Results(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_0bddc8e813350efaa3216873c39c9c3d.Results): An instance of the Results class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_0bddc8e813350efaa3216873c39c9c3d import Results
if self._properties.get('Results', None) is not None:
return self._properties.get('Results')
else:
return Results(self)._select()
@property
def TestConfig(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_b058c64fa3591eb4abc8ee56d4ec176d.TestConfig): An instance of the TestConfig class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_b058c64fa3591eb4abc8ee56d4ec176d import TestConfig
if self._properties.get('TestConfig', None) is not None:
return self._properties.get('TestConfig')
else:
return TestConfig(self)._select()
@property
def TrafficSelection(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_29873b8fb6cf6b316293a9205cd5ddbd.TrafficSelection): An instance of the TrafficSelection class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_29873b8fb6cf6b316293a9205cd5ddbd import TrafficSelection
if self._properties.get('TrafficSelection', None) is not None:
return self._properties.get('TrafficSelection')
else:
return TrafficSelection(self)
@property
def ForceApplyQTConfig(self):
# type: () -> bool
"""
Returns
-------
- bool: Apply QT config
"""
return self._get_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'])
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'], value)
@property
def InputParameters(self):
# type: () -> str
"""
Returns
-------
- str: Input Parameters
"""
return self._get_attribute(self._SDM_ATT_MAP['InputParameters'])
@InputParameters.setter
def InputParameters(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InputParameters'], value)
@property
def Mode(self):
# type: () -> str
"""
Returns
-------
- str(existingMode | newMode): Test mode
"""
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Test name
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> Rfc3918ipmcMinMaxLat
"""Updates rfc3918ipmcMinMaxLat resource on the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> Rfc3918ipmcMinMaxLat
"""Adds a new rfc3918ipmcMinMaxLat resource on the server and adds it to the container.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with all currently retrieved rfc3918ipmcMinMaxLat resources using find and the newly added rfc3918ipmcMinMaxLat resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained rfc3918ipmcMinMaxLat resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
# type: (bool, str, str, str) -> Rfc3918ipmcMinMaxLat
"""Finds and retrieves rfc3918ipmcMinMaxLat resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve rfc3918ipmcMinMaxLat resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all rfc3918ipmcMinMaxLat resources from the server.
Args
----
- ForceApplyQTConfig (bool): Apply QT config
- InputParameters (str): Input Parameters
- Mode (str(existingMode | newMode)): Test mode
- Name (str): Test name
Returns
-------
- self: This instance with matching rfc3918ipmcMinMaxLat resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of rfc3918ipmcMinMaxLat data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the rfc3918ipmcMinMaxLat resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Applies the specified Quick Test.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyAsync operation on the server.
applyAsync(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the applyAsyncResult operation on the server.
applyAsyncResult(async_operation=bool)bool
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool:
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
applyITWizardConfiguration(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
generateReport(async_operation=bool)string
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: This method is asynchronous and has no return value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(async_operation=bool)list
-----------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
run(InputParameters=string, async_operation=bool)list
-----------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(InputParameters=string, async_operation=bool)
---------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
waitForTest(async_operation=bool)list
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
| 43.362105 | 193 | 0.646793 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Rfc3918ipmcMinMaxLat(Base):
__slots__ = ()
_SDM_NAME = 'rfc3918ipmcMinMaxLat'
_SDM_ATT_MAP = {
'ForceApplyQTConfig': 'forceApplyQTConfig',
'InputParameters': 'inputParameters',
'Mode': 'mode',
'Name': 'name',
}
_SDM_ENUM_MAP = {
'mode': ['existingMode', 'newMode'],
}
def __init__(self, parent, list_op=False):
super(Rfc3918ipmcMinMaxLat, self).__init__(parent, list_op)
@property
def LearnFrames(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.learnframes_23d5d0e6c090e51421209fe48681213c import LearnFrames
if self._properties.get('LearnFrames', None) is not None:
return self._properties.get('LearnFrames')
else:
return LearnFrames(self)._select()
@property
def PassCriteria(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.passcriteria_0b0999544178b324ac2991fc06eed5cb import PassCriteria
if self._properties.get('PassCriteria', None) is not None:
return self._properties.get('PassCriteria')
else:
return PassCriteria(self)._select()
@property
def Results(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.results_0bddc8e813350efaa3216873c39c9c3d import Results
if self._properties.get('Results', None) is not None:
return self._properties.get('Results')
else:
return Results(self)._select()
@property
def TestConfig(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.testconfig_b058c64fa3591eb4abc8ee56d4ec176d import TestConfig
if self._properties.get('TestConfig', None) is not None:
return self._properties.get('TestConfig')
else:
return TestConfig(self)._select()
@property
def TrafficSelection(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.trafficselection_29873b8fb6cf6b316293a9205cd5ddbd import TrafficSelection
if self._properties.get('TrafficSelection', None) is not None:
return self._properties.get('TrafficSelection')
else:
return TrafficSelection(self)
@property
def ForceApplyQTConfig(self):
return self._get_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'])
@ForceApplyQTConfig.setter
def ForceApplyQTConfig(self, value):
self._set_attribute(self._SDM_ATT_MAP['ForceApplyQTConfig'], value)
@property
def InputParameters(self):
return self._get_attribute(self._SDM_ATT_MAP['InputParameters'])
@InputParameters.setter
def InputParameters(self, value):
self._set_attribute(self._SDM_ATT_MAP['InputParameters'], value)
@property
def Mode(self):
return self._get_attribute(self._SDM_ATT_MAP['Mode'])
@Mode.setter
def Mode(self, value):
self._set_attribute(self._SDM_ATT_MAP['Mode'], value)
@property
def Name(self):
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
def update(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
self._delete()
def find(self, ForceApplyQTConfig=None, InputParameters=None, Mode=None, Name=None):
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
return self._read(href)
def Apply(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
| true | true |
1c32d61fcb9418e9dcc63acc39db3553e81a2963 | 12,641 | py | Python | models/main.py | PKUxxz/simp-detr | c83846b1b6fc0e396e268dcfef278e162cf231c5 | [
"Apache-2.0"
] | null | null | null | models/main.py | PKUxxz/simp-detr | c83846b1b6fc0e396e268dcfef278e162cf231c5 | [
"Apache-2.0"
] | null | null | null | models/main.py | PKUxxz/simp-detr | c83846b1b6fc0e396e268dcfef278e162cf231c5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import sys
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
sys.path.append("../../../lib")
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from detr import build
from engine import evaluate, train_one_epoch
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--lr_drop', default=45, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='train_log',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
if args.output_dir == "train_log":
local_dir = Path.cwd()
*_, user, experiment = local_dir.parts
output_dir = local_dir.parents[2] / "output" / user / experiment
else:
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
link_dir = Path("train_log")
if utils.is_main_process():
if link_dir.is_symlink():
link_dir.unlink()
link_dir.symlink_to(output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch,
args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
(output_dir / 'eval').mkdir(exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ['latest.pth']
# if epoch % 50 == 0:
# filenames.append(f'{epoch:03}.pth')
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval["bbox"].eval,
output_dir / "eval" / name)
result_file = (output_dir / "eval" / "result.txt")
if epoch == 0 and result_file.exists():
result_file.unlink()
eval_result = utils.get_summary(coco_evaluator.coco_eval["bbox"])
with result_file.open("a") as f:
f.write(f"Evaluation result of Epoch {epoch:03}:\n")
f.write(f"-------------------------------------------------------------------------------\n")
f.write(eval_result)
f.write(f"-------------------------------------------------------------------------------\n\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
main(args)
| 46.304029 | 119 | 0.624159 |
import argparse
import datetime
import json
import random
import sys
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
sys.path.append("../../../lib")
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from detr import build
from engine import evaluate, train_one_epoch
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--lr_drop', default=45, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='train_log',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
if args.output_dir == "train_log":
local_dir = Path.cwd()
*_, user, experiment = local_dir.parts
output_dir = local_dir.parents[2] / "output" / user / experiment
else:
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
link_dir = Path("train_log")
if utils.is_main_process():
if link_dir.is_symlink():
link_dir.unlink()
link_dir.symlink_to(output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch,
args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
(output_dir / 'eval').mkdir(exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ['latest.pth']
# if epoch % 50 == 0:
# filenames.append(f'{epoch:03}.pth')
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval["bbox"].eval,
output_dir / "eval" / name)
result_file = (output_dir / "eval" / "result.txt")
if epoch == 0 and result_file.exists():
result_file.unlink()
eval_result = utils.get_summary(coco_evaluator.coco_eval["bbox"])
with result_file.open("a") as f:
f.write(f"Evaluation result of Epoch {epoch:03}:\n")
f.write(f"-------------------------------------------------------------------------------\n")
f.write(eval_result)
f.write(f"-------------------------------------------------------------------------------\n\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
main(args)
| true | true |
1c32d66bea5628ee2e303da88010cebf2114cd34 | 5,646 | py | Python | tests/test_rdscluster.py | agiza/cloud-custodian | 9592d0f700970e05e89574280cfe0e1479c95228 | [
"Apache-2.0"
] | null | null | null | tests/test_rdscluster.py | agiza/cloud-custodian | 9592d0f700970e05e89574280cfe0e1479c95228 | [
"Apache-2.0"
] | null | null | null | tests/test_rdscluster.py | agiza/cloud-custodian | 9592d0f700970e05e89574280cfe0e1479c95228 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common import BaseTest
from c7n.executor import MainThreadExecutor
from c7n.resources import rdscluster
class RDSClusterTest(BaseTest):
def test_rdscluster_simple(self):
session_factory = self.replay_flight_data('test_rdscluster_simple')
p = self.load_policy({
'name': 'rdscluster-simple',
'resource': 'rds-cluster'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_simple_filter(self):
session_factory = self.replay_flight_data('test_rdscluster_simple')
p = self.load_policy({
'name': 'rdscluster-simple-filter',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_delete(self):
session_factory = self.replay_flight_data('test_rdscluster_delete')
p = self.load_policy({
'name': 'rdscluster-delete',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}],
'actions': [
{'type': 'delete',
'delete-instances': False}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_delete_with_instances(self):
session_factory = self.replay_flight_data('test_rdscluster_delete_with_instances')
p = self.load_policy({
'name': 'rdscluster-delete',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}],
'actions': [
{'type': 'delete',
'delete-instances': True}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_retention(self):
session_factory = self.replay_flight_data('test_rdscluster_retention')
p = self.load_policy({
'name': 'rdscluster-delete',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}],
'actions': [{'type': 'retention', 'days': 21}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_snapshot(self):
session_factory = self.replay_flight_data('test_rdscluster_snapshot')
p = self.load_policy({
'name': 'rdscluster-snapshot',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}],
'actions': [{'type': 'snapshot'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class RDSClusterSnapshotTest(BaseTest):
def test_rdscluster_snapshot_simple(self):
session_factory = self.replay_flight_data('test_rdscluster_snapshot_simple')
p = self.load_policy({
'name': 'rdscluster-snapshot-simple',
'resource': 'rds-cluster-snapshot'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_snapshot_simple_filter(self):
session_factory = self.replay_flight_data('test_rdscluster_snapshot_simple')
p = self.load_policy({
'name': 'rdscluster-snapshot-simple-filter',
'resource': 'rds-cluster-snapshot',
'filters': [
{'type': 'value',
'key': 'StorageEncrypted',
'value': False}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_snapshot_age_filter(self):
factory = self.replay_flight_data('test_rdscluster_snapshot_simple')
p = self.load_policy({
'name': 'rdscluster-snapshot-age-filter',
'resource': 'rds-cluster-snapshot',
'filters': [{'type': 'age', 'days': 7}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_snapshot_trim(self):
factory = self.replay_flight_data('test_rdscluster_snapshot_delete')
p = self.load_policy({
'name': 'rdscluster-snapshot-trim',
'resource': 'rds-cluster-snapshot',
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
| 38.148649 | 90 | 0.591038 |
from common import BaseTest
from c7n.executor import MainThreadExecutor
from c7n.resources import rdscluster
class RDSClusterTest(BaseTest):
def test_rdscluster_simple(self):
session_factory = self.replay_flight_data('test_rdscluster_simple')
p = self.load_policy({
'name': 'rdscluster-simple',
'resource': 'rds-cluster'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_simple_filter(self):
session_factory = self.replay_flight_data('test_rdscluster_simple')
p = self.load_policy({
'name': 'rdscluster-simple-filter',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_delete(self):
session_factory = self.replay_flight_data('test_rdscluster_delete')
p = self.load_policy({
'name': 'rdscluster-delete',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}],
'actions': [
{'type': 'delete',
'delete-instances': False}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_delete_with_instances(self):
session_factory = self.replay_flight_data('test_rdscluster_delete_with_instances')
p = self.load_policy({
'name': 'rdscluster-delete',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}],
'actions': [
{'type': 'delete',
'delete-instances': True}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_retention(self):
session_factory = self.replay_flight_data('test_rdscluster_retention')
p = self.load_policy({
'name': 'rdscluster-delete',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}],
'actions': [{'type': 'retention', 'days': 21}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_snapshot(self):
session_factory = self.replay_flight_data('test_rdscluster_snapshot')
p = self.load_policy({
'name': 'rdscluster-snapshot',
'resource': 'rds-cluster',
'filters': [
{'type': 'value',
'key': 'DBClusterIdentifier',
'value': 'bbb'}],
'actions': [{'type': 'snapshot'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class RDSClusterSnapshotTest(BaseTest):
def test_rdscluster_snapshot_simple(self):
session_factory = self.replay_flight_data('test_rdscluster_snapshot_simple')
p = self.load_policy({
'name': 'rdscluster-snapshot-simple',
'resource': 'rds-cluster-snapshot'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_snapshot_simple_filter(self):
session_factory = self.replay_flight_data('test_rdscluster_snapshot_simple')
p = self.load_policy({
'name': 'rdscluster-snapshot-simple-filter',
'resource': 'rds-cluster-snapshot',
'filters': [
{'type': 'value',
'key': 'StorageEncrypted',
'value': False}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_rdscluster_snapshot_age_filter(self):
factory = self.replay_flight_data('test_rdscluster_snapshot_simple')
p = self.load_policy({
'name': 'rdscluster-snapshot-age-filter',
'resource': 'rds-cluster-snapshot',
'filters': [{'type': 'age', 'days': 7}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_rdscluster_snapshot_trim(self):
factory = self.replay_flight_data('test_rdscluster_snapshot_delete')
p = self.load_policy({
'name': 'rdscluster-snapshot-trim',
'resource': 'rds-cluster-snapshot',
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
| true | true |
1c32d68f3c087086e1183822232971de969a5f6a | 1,359 | py | Python | _bin/java2smali.py | HongxuChen/dotfiles | 4a51a74ae3345273514faccf40a47e1a39048049 | [
"MIT"
] | 13 | 2015-09-11T14:53:06.000Z | 2021-12-19T23:07:06.000Z | _bin/java2smali.py | HongxuChen/dotfiles | 4a51a74ae3345273514faccf40a47e1a39048049 | [
"MIT"
] | 5 | 2015-03-18T17:08:01.000Z | 2020-01-07T08:51:47.000Z | _bin/java2smali.py | HongxuChen/dotfiles | 4a51a74ae3345273514faccf40a47e1a39048049 | [
"MIT"
] | 4 | 2016-12-27T14:52:19.000Z | 2019-05-16T07:07:53.000Z | #!/usr/bin/env python3
from __future__ import print_function
import os
import shutil
import subprocess
version = "1.6"
name = "_java2smali"
dex_type = "apk"
def require(cond, msg):
if not cond:
raise Exception(msg)
def run(cmd):
print("=> {}".format(cmd))
subprocess.call(cmd.split())
def java2jar(d):
java_files = [f for f in os.listdir(d) if f.endswith(".java")]
for f in java_files:
cmd = "javac -source {} -target {} {}".format(version, version, f)
run(cmd)
class_files = [f for f in os.listdir(d) if f.endswith(".class")]
jar_file = name + ".jar"
cmd = "jar cvf {} {}".format(jar_file, " ".join(class_files))
run(cmd)
require(os.path.exists(jar_file), "{} not generated".format(jar_file))
for f in class_files:
os.remove(f)
return jar_file
def jar2smali(jar_file):
dex_file = name + "." + dex_type
cmd = "dx --dex --output={} {}".format(dex_file, jar_file)
run(cmd)
require(os.path.exists(dex_file), "{} not generated".format(dex_file))
os.remove(jar_file)
cmd = "apktool -f d {}".format(dex_file)
run(cmd)
os.remove(dex_file)
def tear_up():
shutil.rmtree(name, ignore_errors=True)
def java2smali(d):
tear_up()
jar_file = java2jar(d)
jar2smali(jar_file)
if __name__ == '__main__':
java2smali(os.curdir)
| 21.234375 | 74 | 0.629139 |
from __future__ import print_function
import os
import shutil
import subprocess
version = "1.6"
name = "_java2smali"
dex_type = "apk"
def require(cond, msg):
if not cond:
raise Exception(msg)
def run(cmd):
print("=> {}".format(cmd))
subprocess.call(cmd.split())
def java2jar(d):
java_files = [f for f in os.listdir(d) if f.endswith(".java")]
for f in java_files:
cmd = "javac -source {} -target {} {}".format(version, version, f)
run(cmd)
class_files = [f for f in os.listdir(d) if f.endswith(".class")]
jar_file = name + ".jar"
cmd = "jar cvf {} {}".format(jar_file, " ".join(class_files))
run(cmd)
require(os.path.exists(jar_file), "{} not generated".format(jar_file))
for f in class_files:
os.remove(f)
return jar_file
def jar2smali(jar_file):
dex_file = name + "." + dex_type
cmd = "dx --dex --output={} {}".format(dex_file, jar_file)
run(cmd)
require(os.path.exists(dex_file), "{} not generated".format(dex_file))
os.remove(jar_file)
cmd = "apktool -f d {}".format(dex_file)
run(cmd)
os.remove(dex_file)
def tear_up():
shutil.rmtree(name, ignore_errors=True)
def java2smali(d):
tear_up()
jar_file = java2jar(d)
jar2smali(jar_file)
if __name__ == '__main__':
java2smali(os.curdir)
| true | true |
1c32d71d45f237e8444736f93738b7e89ba16125 | 1,225 | py | Python | setup.py | dreamkeep/hbp-neuromorphic-client | e4268321e12d6bb0da600ed977d088317b7c47d9 | [
"Apache-2.0"
] | 1 | 2019-12-24T04:29:11.000Z | 2019-12-24T04:29:11.000Z | setup.py | dreamkeep/hbp-neuromorphic-client | e4268321e12d6bb0da600ed977d088317b7c47d9 | [
"Apache-2.0"
] | null | null | null | setup.py | dreamkeep/hbp-neuromorphic-client | e4268321e12d6bb0da600ed977d088317b7c47d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
long_description = open("README.md").read()
setup(
name="hbp_neuromorphic_platform",
version='0.7.2',
packages=['nmpi'],
install_requires=['requests',],
author="Andrew P. Davison and Domenico Guarino",
author_email="andrew.davison@unic.cnrs-gif.fr",
description="Client software for the Human Brain Project Neuromorphic Computing Platform",
long_description=long_description,
license="License :: OSI Approved :: Apache Software License",
url='http://www.humanbrainproject.eu',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: Other/Proprietary License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering']
)
| 36.029412 | 94 | 0.638367 |
from distutils.core import setup
long_description = open("README.md").read()
setup(
name="hbp_neuromorphic_platform",
version='0.7.2',
packages=['nmpi'],
install_requires=['requests',],
author="Andrew P. Davison and Domenico Guarino",
author_email="andrew.davison@unic.cnrs-gif.fr",
description="Client software for the Human Brain Project Neuromorphic Computing Platform",
long_description=long_description,
license="License :: OSI Approved :: Apache Software License",
url='http://www.humanbrainproject.eu',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: Other/Proprietary License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering']
)
| true | true |
1c32d7a8998db158d1459d6bbf46c622488c673c | 1,185 | py | Python | setup.py | woolfson-group/array_sensing | ae9e0c7d9137e4034d183ebc920b7ee6352ebc93 | [
"MIT"
] | null | null | null | setup.py | woolfson-group/array_sensing | ae9e0c7d9137e4034d183ebc920b7ee6352ebc93 | [
"MIT"
] | null | null | null | setup.py | woolfson-group/array_sensing | ae9e0c7d9137e4034d183ebc920b7ee6352ebc93 | [
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='array_sensing',
packages=find_packages(),
version='0.1.0',
description=('BADASS_ML: A package to parse the input data and run ML '
'on fluorescence data collected from the BADASS (Barrel '
'Array Diagnostics And SenSing) technology'),
long_description=long_description,
long_description_content_type='text/markdown',
author=('Kathryn Shelley, Chris Wells Wood and Will Dawson in the lab '
'of Professor Dek Woolfson, University of Bristol'),
author_email='kathryn.l.shelley@gmail.com',
url='https://github.com/woolfson-group/array_sensing',
license='MIT',
keywords=['badass', 'array sensing', 'machine learning'],
install_requires=['imblearn', 'jinja2', 'numpy', 'matplotlib', 'mlxtend',
'openpyxl', 'pandas', 'scikit-learn', 'scipy', 'seaborn',
'xlrd'],
classifiers=['Programming Language :: Python'],
python_requires=('!=2.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, '
'!=3.5.*, <4')
)
| 40.862069 | 79 | 0.611814 |
from setuptools import setup, find_packages
with open('README.md', 'r') as f:
long_description = f.read()
setup(
name='array_sensing',
packages=find_packages(),
version='0.1.0',
description=('BADASS_ML: A package to parse the input data and run ML '
'on fluorescence data collected from the BADASS (Barrel '
'Array Diagnostics And SenSing) technology'),
long_description=long_description,
long_description_content_type='text/markdown',
author=('Kathryn Shelley, Chris Wells Wood and Will Dawson in the lab '
'of Professor Dek Woolfson, University of Bristol'),
author_email='kathryn.l.shelley@gmail.com',
url='https://github.com/woolfson-group/array_sensing',
license='MIT',
keywords=['badass', 'array sensing', 'machine learning'],
install_requires=['imblearn', 'jinja2', 'numpy', 'matplotlib', 'mlxtend',
'openpyxl', 'pandas', 'scikit-learn', 'scipy', 'seaborn',
'xlrd'],
classifiers=['Programming Language :: Python'],
python_requires=('!=2.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, '
'!=3.5.*, <4')
)
| true | true |
1c32d7f72ce35c184dd89232c5ca0cace531ab3f | 1,034 | py | Python | efilter_tests/unit/protocols/applicative.py | Onager/dotty | b145131499be0c4b755fc2e2ac19be11a50bce6a | [
"Apache-2.0"
] | 54 | 2015-08-02T14:26:50.000Z | 2021-10-21T02:44:25.000Z | efilter_tests/unit/protocols/applicative.py | Onager/dotty | b145131499be0c4b755fc2e2ac19be11a50bce6a | [
"Apache-2.0"
] | 23 | 2015-11-27T10:08:58.000Z | 2017-09-27T08:54:56.000Z | efilter_tests/unit/protocols/applicative.py | Onager/dotty | b145131499be0c4b755fc2e2ac19be11a50bce6a | [
"Apache-2.0"
] | 16 | 2015-08-14T10:11:20.000Z | 2021-10-21T02:44:18.000Z | # EFILTER Forensic Query Language
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EFILTER test suite.
"""
__author__ = "Adam Sindelar <adamsh@google.com>"
import unittest
from efilter_tests import mocks
from efilter.protocols import applicative
# pylint: disable=blacklisted-name
class ApplicativeTest(unittest.TestCase):
def testApplyingFunction(self):
result = applicative.apply(mocks.MockFunction(), [10], dict(y=20))
self.assertEqual(result, 200)
| 27.945946 | 74 | 0.752418 |
__author__ = "Adam Sindelar <adamsh@google.com>"
import unittest
from efilter_tests import mocks
from efilter.protocols import applicative
class ApplicativeTest(unittest.TestCase):
def testApplyingFunction(self):
result = applicative.apply(mocks.MockFunction(), [10], dict(y=20))
self.assertEqual(result, 200)
| true | true |
1c32d80c636c7007f06dbb384a20862f86d2b290 | 8,945 | py | Python | tests/test_segmentation.py | goitom/eemeter | bb05d5b776546858f8f3a8d3a95bec202728d9f0 | [
"Apache-2.0"
] | null | null | null | tests/test_segmentation.py | goitom/eemeter | bb05d5b776546858f8f3a8d3a95bec202728d9f0 | [
"Apache-2.0"
] | null | null | null | tests/test_segmentation.py | goitom/eemeter | bb05d5b776546858f8f3a8d3a95bec202728d9f0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2018 Open Energy Efficiency, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import numpy as np
import pandas as pd
import pytest
from eemeter.segmentation import (
CalTRACKSegmentModel,
SegmentedModel,
segment_time_series,
iterate_segmented_dataset,
)
@pytest.fixture
def index_8760():
return pd.date_range("2017-01-01", periods=365 * 24, freq="H", tz="UTC")
def test_segment_time_series_invalid_type(index_8760):
with pytest.raises(ValueError):
segment_time_series(index_8760, segment_type="unknown")
def test_segment_time_series_single(index_8760):
weights = segment_time_series(index_8760, segment_type="single")
assert list(weights.columns) == ["all"]
assert weights.shape == (8760, 1)
assert weights.sum().sum() == 8760.0
def test_segment_time_series_one_month(index_8760):
weights = segment_time_series(index_8760, segment_type="one_month")
assert list(weights.columns) == [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
assert weights.shape == (8760, 12)
assert weights.sum().sum() == 8760.0
def test_segment_time_series_three_month(index_8760):
weights = segment_time_series(index_8760, segment_type="three_month")
assert list(weights.columns) == [
"dec-jan-feb",
"jan-feb-mar",
"feb-mar-apr",
"mar-apr-may",
"apr-may-jun",
"may-jun-jul",
"jun-jul-aug",
"jul-aug-sep",
"aug-sep-oct",
"sep-oct-nov",
"oct-nov-dec",
"nov-dec-jan",
]
assert weights.shape == (8760, 12)
assert weights.sum().sum() == 26280.0
def test_segment_time_series_three_month_weighted(index_8760):
weights = segment_time_series(index_8760, segment_type="three_month_weighted")
assert list(weights.columns) == [
"dec-jan-feb-weighted",
"jan-feb-mar-weighted",
"feb-mar-apr-weighted",
"mar-apr-may-weighted",
"apr-may-jun-weighted",
"may-jun-jul-weighted",
"jun-jul-aug-weighted",
"jul-aug-sep-weighted",
"aug-sep-oct-weighted",
"sep-oct-nov-weighted",
"oct-nov-dec-weighted",
"nov-dec-jan-weighted",
]
assert weights.shape == (8760, 12)
assert weights.sum().sum() == 17520.0
def test_segment_time_series_drop_zero_weight_segments(index_8760):
weights = segment_time_series(
index_8760[:100], segment_type="one_month", drop_zero_weight_segments=True
)
assert list(weights.columns) == ["jan"]
assert weights.shape == (100, 1)
assert weights.sum().sum() == 100.0
@pytest.fixture
def dataset():
index = pd.date_range("2017-01-01", periods=1000, freq="H", tz="UTC")
return pd.DataFrame({"a": 1, "b": 2}, index=index, columns=["a", "b"])
def test_iterate_segmented_dataset_no_segmentation(dataset):
iterator = iterate_segmented_dataset(dataset, segmentation=None)
segment_name, data = next(iterator)
assert segment_name is None
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (1000, 3)
assert data.sum().sum() == 4000
with pytest.raises(StopIteration):
next(iterator)
@pytest.fixture
def segmentation(dataset):
return segment_time_series(dataset.index, segment_type="one_month")
def test_iterate_segmented_dataset_with_segmentation(dataset, segmentation):
iterator = iterate_segmented_dataset(dataset, segmentation=segmentation)
segment_name, data = next(iterator)
assert segment_name == "jan"
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (744, 3)
assert data.sum().sum() == 2976.0
segment_name, data = next(iterator)
assert segment_name == "feb"
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (256, 3)
assert data.sum().sum() == 1024.0
segment_name, data = next(iterator)
assert segment_name == "mar"
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (0, 3)
assert data.sum().sum() == 0.0
def test_iterate_segmented_dataset_with_processor(dataset, segmentation):
feature_processor_segment_names = []
def feature_processor(
segment_name, dataset, column_mapping=None
): # rename some columns
feature_processor_segment_names.append(segment_name)
return dataset.rename(columns=column_mapping).assign(weight=1)
iterator = iterate_segmented_dataset(
dataset,
segmentation=segmentation,
feature_processor=feature_processor,
feature_processor_kwargs={"column_mapping": {"a": "c", "b": "d"}},
feature_processor_segment_name_mapping={"jan": "jan2", "feb": "feb2"},
)
segment_name, data = next(iterator)
assert feature_processor_segment_names == ["jan2"]
assert segment_name == "jan"
assert list(data.columns) == ["c", "d", "weight"]
assert data.shape == (1000, 3)
assert data.sum().sum() == 4000.0
segment_name, data = next(iterator)
assert feature_processor_segment_names == ["jan2", "feb2"]
assert segment_name == "feb"
assert list(data.columns) == ["c", "d", "weight"]
assert data.shape == (1000, 3)
assert data.sum().sum() == 4000.0
def test_segment_model():
segment_model = CalTRACKSegmentModel(
segment_name="segment",
model=None,
formula="meter_value ~ C(hour_of_week) + a - 1",
model_params={"C(hour_of_week)[1]": 1, "a": 1},
warnings=None,
)
index = pd.date_range("2017-01-01", periods=2, freq="H", tz="UTC")
data = pd.DataFrame({"a": [1, 1], "hour_of_week": [1, 1]}, index=index)
prediction = segment_model.predict(data)
assert prediction.sum() == 4
def test_segmented_model():
segment_model = CalTRACKSegmentModel(
segment_name="jan",
model=None,
formula="meter_value ~ C(hour_of_week) + a- 1",
model_params={"C(hour_of_week)[1]": 1, "a": 1},
warnings=None,
)
def fake_feature_processor(segment_name, segment_data):
return pd.DataFrame(
{"hour_of_week": 1, "a": 1, "weight": segment_data.weight},
index=segment_data.index,
)
segmented_model = SegmentedModel(
segment_models=[segment_model],
prediction_segment_type="one_month",
prediction_segment_name_mapping=None,
prediction_feature_processor=fake_feature_processor,
prediction_feature_processor_kwargs=None,
)
# make this cover jan and feb but only supply jan model
index = pd.date_range("2017-01-01", periods=24 * 50, freq="H", tz="UTC")
temps = pd.Series(np.linspace(0, 100, 24 * 50), index=index)
prediction = segmented_model.predict(temps.index, temps).result.predicted_usage
assert prediction.sum() == 1488.0
def test_segment_model_serialized():
segment_model = CalTRACKSegmentModel(
segment_name="jan",
model=None,
formula="meter_value ~ a + b - 1",
model_params={"a": 1, "b": 1},
warnings=None,
)
assert segment_model.json()["formula"] == "meter_value ~ a + b - 1"
assert segment_model.json()["model_params"] == {"a": 1, "b": 1}
assert segment_model.json()["warnings"] == []
assert json.dumps(segment_model.json())
def test_segmented_model_serialized():
segment_model = CalTRACKSegmentModel(
segment_name="jan",
model=None,
formula="meter_value ~ a + b - 1",
model_params={"a": 1, "b": 1},
warnings=None,
)
def fake_feature_processor(segment_name, segment_data): # pragma: no cover
return pd.DataFrame(
{"a": 1, "b": 1, "weight": segment_data.weight}, index=segment_data.index
)
segmented_model = SegmentedModel(
segment_models=[segment_model],
prediction_segment_type="one_month",
prediction_segment_name_mapping=None,
prediction_feature_processor=fake_feature_processor,
prediction_feature_processor_kwargs=None,
)
assert segmented_model.json()["prediction_segment_type"] == "one_month"
assert (
segmented_model.json()["prediction_feature_processor"]
== "fake_feature_processor"
)
assert json.dumps(segmented_model.json())
| 32.060932 | 85 | 0.651202 |
import json
import numpy as np
import pandas as pd
import pytest
from eemeter.segmentation import (
CalTRACKSegmentModel,
SegmentedModel,
segment_time_series,
iterate_segmented_dataset,
)
@pytest.fixture
def index_8760():
return pd.date_range("2017-01-01", periods=365 * 24, freq="H", tz="UTC")
def test_segment_time_series_invalid_type(index_8760):
with pytest.raises(ValueError):
segment_time_series(index_8760, segment_type="unknown")
def test_segment_time_series_single(index_8760):
weights = segment_time_series(index_8760, segment_type="single")
assert list(weights.columns) == ["all"]
assert weights.shape == (8760, 1)
assert weights.sum().sum() == 8760.0
def test_segment_time_series_one_month(index_8760):
weights = segment_time_series(index_8760, segment_type="one_month")
assert list(weights.columns) == [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
assert weights.shape == (8760, 12)
assert weights.sum().sum() == 8760.0
def test_segment_time_series_three_month(index_8760):
weights = segment_time_series(index_8760, segment_type="three_month")
assert list(weights.columns) == [
"dec-jan-feb",
"jan-feb-mar",
"feb-mar-apr",
"mar-apr-may",
"apr-may-jun",
"may-jun-jul",
"jun-jul-aug",
"jul-aug-sep",
"aug-sep-oct",
"sep-oct-nov",
"oct-nov-dec",
"nov-dec-jan",
]
assert weights.shape == (8760, 12)
assert weights.sum().sum() == 26280.0
def test_segment_time_series_three_month_weighted(index_8760):
weights = segment_time_series(index_8760, segment_type="three_month_weighted")
assert list(weights.columns) == [
"dec-jan-feb-weighted",
"jan-feb-mar-weighted",
"feb-mar-apr-weighted",
"mar-apr-may-weighted",
"apr-may-jun-weighted",
"may-jun-jul-weighted",
"jun-jul-aug-weighted",
"jul-aug-sep-weighted",
"aug-sep-oct-weighted",
"sep-oct-nov-weighted",
"oct-nov-dec-weighted",
"nov-dec-jan-weighted",
]
assert weights.shape == (8760, 12)
assert weights.sum().sum() == 17520.0
def test_segment_time_series_drop_zero_weight_segments(index_8760):
weights = segment_time_series(
index_8760[:100], segment_type="one_month", drop_zero_weight_segments=True
)
assert list(weights.columns) == ["jan"]
assert weights.shape == (100, 1)
assert weights.sum().sum() == 100.0
@pytest.fixture
def dataset():
index = pd.date_range("2017-01-01", periods=1000, freq="H", tz="UTC")
return pd.DataFrame({"a": 1, "b": 2}, index=index, columns=["a", "b"])
def test_iterate_segmented_dataset_no_segmentation(dataset):
iterator = iterate_segmented_dataset(dataset, segmentation=None)
segment_name, data = next(iterator)
assert segment_name is None
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (1000, 3)
assert data.sum().sum() == 4000
with pytest.raises(StopIteration):
next(iterator)
@pytest.fixture
def segmentation(dataset):
return segment_time_series(dataset.index, segment_type="one_month")
def test_iterate_segmented_dataset_with_segmentation(dataset, segmentation):
iterator = iterate_segmented_dataset(dataset, segmentation=segmentation)
segment_name, data = next(iterator)
assert segment_name == "jan"
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (744, 3)
assert data.sum().sum() == 2976.0
segment_name, data = next(iterator)
assert segment_name == "feb"
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (256, 3)
assert data.sum().sum() == 1024.0
segment_name, data = next(iterator)
assert segment_name == "mar"
assert list(data.columns) == ["a", "b", "weight"]
assert data.shape == (0, 3)
assert data.sum().sum() == 0.0
def test_iterate_segmented_dataset_with_processor(dataset, segmentation):
feature_processor_segment_names = []
def feature_processor(
segment_name, dataset, column_mapping=None
):
feature_processor_segment_names.append(segment_name)
return dataset.rename(columns=column_mapping).assign(weight=1)
iterator = iterate_segmented_dataset(
dataset,
segmentation=segmentation,
feature_processor=feature_processor,
feature_processor_kwargs={"column_mapping": {"a": "c", "b": "d"}},
feature_processor_segment_name_mapping={"jan": "jan2", "feb": "feb2"},
)
segment_name, data = next(iterator)
assert feature_processor_segment_names == ["jan2"]
assert segment_name == "jan"
assert list(data.columns) == ["c", "d", "weight"]
assert data.shape == (1000, 3)
assert data.sum().sum() == 4000.0
segment_name, data = next(iterator)
assert feature_processor_segment_names == ["jan2", "feb2"]
assert segment_name == "feb"
assert list(data.columns) == ["c", "d", "weight"]
assert data.shape == (1000, 3)
assert data.sum().sum() == 4000.0
def test_segment_model():
segment_model = CalTRACKSegmentModel(
segment_name="segment",
model=None,
formula="meter_value ~ C(hour_of_week) + a - 1",
model_params={"C(hour_of_week)[1]": 1, "a": 1},
warnings=None,
)
index = pd.date_range("2017-01-01", periods=2, freq="H", tz="UTC")
data = pd.DataFrame({"a": [1, 1], "hour_of_week": [1, 1]}, index=index)
prediction = segment_model.predict(data)
assert prediction.sum() == 4
def test_segmented_model():
segment_model = CalTRACKSegmentModel(
segment_name="jan",
model=None,
formula="meter_value ~ C(hour_of_week) + a- 1",
model_params={"C(hour_of_week)[1]": 1, "a": 1},
warnings=None,
)
def fake_feature_processor(segment_name, segment_data):
return pd.DataFrame(
{"hour_of_week": 1, "a": 1, "weight": segment_data.weight},
index=segment_data.index,
)
segmented_model = SegmentedModel(
segment_models=[segment_model],
prediction_segment_type="one_month",
prediction_segment_name_mapping=None,
prediction_feature_processor=fake_feature_processor,
prediction_feature_processor_kwargs=None,
)
index = pd.date_range("2017-01-01", periods=24 * 50, freq="H", tz="UTC")
temps = pd.Series(np.linspace(0, 100, 24 * 50), index=index)
prediction = segmented_model.predict(temps.index, temps).result.predicted_usage
assert prediction.sum() == 1488.0
def test_segment_model_serialized():
segment_model = CalTRACKSegmentModel(
segment_name="jan",
model=None,
formula="meter_value ~ a + b - 1",
model_params={"a": 1, "b": 1},
warnings=None,
)
assert segment_model.json()["formula"] == "meter_value ~ a + b - 1"
assert segment_model.json()["model_params"] == {"a": 1, "b": 1}
assert segment_model.json()["warnings"] == []
assert json.dumps(segment_model.json())
def test_segmented_model_serialized():
segment_model = CalTRACKSegmentModel(
segment_name="jan",
model=None,
formula="meter_value ~ a + b - 1",
model_params={"a": 1, "b": 1},
warnings=None,
)
def fake_feature_processor(segment_name, segment_data):
return pd.DataFrame(
{"a": 1, "b": 1, "weight": segment_data.weight}, index=segment_data.index
)
segmented_model = SegmentedModel(
segment_models=[segment_model],
prediction_segment_type="one_month",
prediction_segment_name_mapping=None,
prediction_feature_processor=fake_feature_processor,
prediction_feature_processor_kwargs=None,
)
assert segmented_model.json()["prediction_segment_type"] == "one_month"
assert (
segmented_model.json()["prediction_feature_processor"]
== "fake_feature_processor"
)
assert json.dumps(segmented_model.json())
| true | true |
1c32d8431f0489ef556ac058fc71283341a5757d | 64 | py | Python | setup.py | ken2403/schnettriple | 28df9c89648f07524b563c0b9b58d93d9264869c | [
"MIT"
] | null | null | null | setup.py | ken2403/schnettriple | 28df9c89648f07524b563c0b9b58d93d9264869c | [
"MIT"
] | null | null | null | setup.py | ken2403/schnettriple | 28df9c89648f07524b563c0b9b58d93d9264869c | [
"MIT"
] | null | null | null | import setuptools
# argumet is in setup.cfg
setuptools.setup()
| 12.8 | 25 | 0.78125 | import setuptools
setuptools.setup()
| true | true |
1c32d8bc34e95480e3ea1a50f32460e724aa0965 | 36,950 | py | Python | src/models/model.py | CrafterKolyan/IRNet-1 | 72df5c876f368ae4a1b594e7a740ff966dbbd3ba | [
"MIT"
] | 223 | 2019-11-11T09:06:51.000Z | 2022-03-17T06:55:56.000Z | src/models/model.py | KevinQian97/Semantic-Error-Correction | 0cbad02e4a6c8638af12c14856df84d143f9299d | [
"Apache-2.0"
] | 50 | 2019-11-17T08:08:09.000Z | 2022-02-13T08:54:02.000Z | src/models/model.py | KevinQian97/Semantic-Error-Correction | 0cbad02e4a6c8638af12c14856df84d143f9299d | [
"Apache-2.0"
] | 86 | 2019-11-11T09:07:50.000Z | 2022-02-14T07:41:33.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding: utf-8 -*-
"""
# @Time : 2019/5/25
# @Author : Jiaqi&Zecheng
# @File : model.py
# @Software: PyCharm
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils
from torch.autograd import Variable
from src.beam import Beams, ActionInfo
from src.dataset import Batch
from src.models import nn_utils
from src.models.basic_model import BasicModel
from src.models.pointer_net import PointerNet
from src.rule import semQL as define_rule
class IRNet(BasicModel):
def __init__(self, args, grammar):
super(IRNet, self).__init__()
self.args = args
self.grammar = grammar
self.use_column_pointer = args.column_pointer
self.use_sentence_features = args.sentence_features
if args.cuda:
self.new_long_tensor = torch.cuda.LongTensor
self.new_tensor = torch.cuda.FloatTensor
else:
self.new_long_tensor = torch.LongTensor
self.new_tensor = torch.FloatTensor
self.encoder_lstm = nn.LSTM(args.embed_size, args.hidden_size // 2, bidirectional=True,
batch_first=True)
input_dim = args.action_embed_size + \
args.att_vec_size + \
args.type_embed_size
# previous action
# input feeding
# pre type embedding
self.lf_decoder_lstm = nn.LSTMCell(input_dim, args.hidden_size)
self.sketch_decoder_lstm = nn.LSTMCell(input_dim, args.hidden_size)
# initialize the decoder's state and cells with encoder hidden states
self.decoder_cell_init = nn.Linear(args.hidden_size, args.hidden_size)
self.att_sketch_linear = nn.Linear(args.hidden_size, args.hidden_size, bias=False)
self.att_lf_linear = nn.Linear(args.hidden_size, args.hidden_size, bias=False)
self.sketch_att_vec_linear = nn.Linear(args.hidden_size + args.hidden_size, args.att_vec_size, bias=False)
self.lf_att_vec_linear = nn.Linear(args.hidden_size + args.hidden_size, args.att_vec_size, bias=False)
self.prob_att = nn.Linear(args.att_vec_size, 1)
self.prob_len = nn.Linear(1, 1)
self.col_type = nn.Linear(4, args.col_embed_size)
self.sketch_encoder = nn.LSTM(args.action_embed_size, args.action_embed_size // 2, bidirectional=True,
batch_first=True)
self.production_embed = nn.Embedding(len(grammar.prod2id), args.action_embed_size)
self.type_embed = nn.Embedding(len(grammar.type2id), args.type_embed_size)
self.production_readout_b = nn.Parameter(torch.FloatTensor(len(grammar.prod2id)).zero_())
self.att_project = nn.Linear(args.hidden_size + args.type_embed_size, args.hidden_size)
self.N_embed = nn.Embedding(len(define_rule.N._init_grammar()), args.action_embed_size)
self.read_out_act = F.tanh if args.readout == 'non_linear' else nn_utils.identity
self.query_vec_to_action_embed = nn.Linear(args.att_vec_size, args.action_embed_size,
bias=args.readout == 'non_linear')
self.production_readout = lambda q: F.linear(self.read_out_act(self.query_vec_to_action_embed(q)),
self.production_embed.weight, self.production_readout_b)
self.q_att = nn.Linear(args.hidden_size, args.embed_size)
self.column_rnn_input = nn.Linear(args.col_embed_size, args.action_embed_size, bias=False)
self.table_rnn_input = nn.Linear(args.col_embed_size, args.action_embed_size, bias=False)
self.dropout = nn.Dropout(args.dropout)
self.column_pointer_net = PointerNet(args.hidden_size, args.col_embed_size, attention_type=args.column_att)
self.table_pointer_net = PointerNet(args.hidden_size, args.col_embed_size, attention_type=args.column_att)
# initial the embedding layers
nn.init.xavier_normal_(self.production_embed.weight.data)
nn.init.xavier_normal_(self.type_embed.weight.data)
nn.init.xavier_normal_(self.N_embed.weight.data)
print('Use Column Pointer: ', True if self.use_column_pointer else False)
def forward(self, examples):
args = self.args
# now should implement the examples
batch = Batch(examples, self.grammar, cuda=self.args.cuda)
table_appear_mask = batch.table_appear_mask
src_encodings, (last_state, last_cell) = self.encode(batch.src_sents, batch.src_sents_len, None)
src_encodings = self.dropout(src_encodings)
utterance_encodings_sketch_linear = self.att_sketch_linear(src_encodings)
utterance_encodings_lf_linear = self.att_lf_linear(src_encodings)
dec_init_vec = self.init_decoder_state(last_cell)
h_tm1 = dec_init_vec
action_probs = [[] for _ in examples]
zero_action_embed = Variable(self.new_tensor(args.action_embed_size).zero_())
zero_type_embed = Variable(self.new_tensor(args.type_embed_size).zero_())
sketch_attention_history = list()
for t in range(batch.max_sketch_num):
if t == 0:
x = Variable(self.new_tensor(len(batch), self.sketch_decoder_lstm.input_size).zero_(),
requires_grad=False)
else:
a_tm1_embeds = []
pre_types = []
for e_id, example in enumerate(examples):
if t < len(example.sketch):
# get the last action
# This is the action embedding
action_tm1 = example.sketch[t - 1]
if type(action_tm1) in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order]:
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
print(action_tm1, 'only for sketch')
quit()
a_tm1_embed = zero_action_embed
pass
else:
a_tm1_embed = zero_action_embed
a_tm1_embeds.append(a_tm1_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
for e_id, example in enumerate(examples):
if t < len(example.sketch):
action_tm = example.sketch[t - 1]
pre_type = self.type_embed.weight[self.grammar.type2id[type(action_tm)]]
else:
pre_type = zero_type_embed
pre_types.append(pre_type)
pre_types = torch.stack(pre_types)
inputs.append(att_tm1)
inputs.append(pre_types)
x = torch.cat(inputs, dim=-1)
src_mask = batch.src_token_mask
(h_t, cell_t), att_t, aw = self.step(x, h_tm1, src_encodings,
utterance_encodings_sketch_linear, self.sketch_decoder_lstm,
self.sketch_att_vec_linear,
src_token_mask=src_mask, return_att_weight=True)
sketch_attention_history.append(att_t)
# get the Root possibility
apply_rule_prob = F.softmax(self.production_readout(att_t), dim=-1)
for e_id, example in enumerate(examples):
if t < len(example.sketch):
action_t = example.sketch[t]
act_prob_t_i = apply_rule_prob[e_id, self.grammar.prod2id[action_t.production]]
action_probs[e_id].append(act_prob_t_i)
h_tm1 = (h_t, cell_t)
att_tm1 = att_t
sketch_prob_var = torch.stack(
[torch.stack(action_probs_i, dim=0).log().sum() for action_probs_i in action_probs], dim=0)
table_embedding = self.gen_x_batch(batch.table_sents)
src_embedding = self.gen_x_batch(batch.src_sents)
schema_embedding = self.gen_x_batch(batch.table_names)
# get emb differ
embedding_differ = self.embedding_cosine(src_embedding=src_embedding, table_embedding=table_embedding,
table_unk_mask=batch.table_unk_mask)
schema_differ = self.embedding_cosine(src_embedding=src_embedding, table_embedding=schema_embedding,
table_unk_mask=batch.schema_token_mask)
tab_ctx = (src_encodings.unsqueeze(1) * embedding_differ.unsqueeze(3)).sum(2)
schema_ctx = (src_encodings.unsqueeze(1) * schema_differ.unsqueeze(3)).sum(2)
table_embedding = table_embedding + tab_ctx
schema_embedding = schema_embedding + schema_ctx
col_type = self.input_type(batch.col_hot_type)
col_type_var = self.col_type(col_type)
table_embedding = table_embedding + col_type_var
batch_table_dict = batch.col_table_dict
table_enable = np.zeros(shape=(len(examples)))
action_probs = [[] for _ in examples]
h_tm1 = dec_init_vec
for t in range(batch.max_action_num):
if t == 0:
# x = self.lf_begin_vec.unsqueeze(0).repeat(len(batch), 1)
x = Variable(self.new_tensor(len(batch), self.lf_decoder_lstm.input_size).zero_(), requires_grad=False)
else:
a_tm1_embeds = []
pre_types = []
for e_id, example in enumerate(examples):
if t < len(example.tgt_actions):
action_tm1 = example.tgt_actions[t - 1]
if type(action_tm1) in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order,
]:
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
if isinstance(action_tm1, define_rule.C):
a_tm1_embed = self.column_rnn_input(table_embedding[e_id, action_tm1.id_c])
elif isinstance(action_tm1, define_rule.T):
a_tm1_embed = self.column_rnn_input(schema_embedding[e_id, action_tm1.id_c])
elif isinstance(action_tm1, define_rule.A):
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
print(action_tm1, 'not implement')
quit()
a_tm1_embed = zero_action_embed
pass
else:
a_tm1_embed = zero_action_embed
a_tm1_embeds.append(a_tm1_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
# tgt t-1 action type
for e_id, example in enumerate(examples):
if t < len(example.tgt_actions):
action_tm = example.tgt_actions[t - 1]
pre_type = self.type_embed.weight[self.grammar.type2id[type(action_tm)]]
else:
pre_type = zero_type_embed
pre_types.append(pre_type)
pre_types = torch.stack(pre_types)
inputs.append(att_tm1)
inputs.append(pre_types)
x = torch.cat(inputs, dim=-1)
src_mask = batch.src_token_mask
(h_t, cell_t), att_t, aw = self.step(x, h_tm1, src_encodings,
utterance_encodings_lf_linear, self.lf_decoder_lstm,
self.lf_att_vec_linear,
src_token_mask=src_mask, return_att_weight=True)
apply_rule_prob = F.softmax(self.production_readout(att_t), dim=-1)
table_appear_mask_val = torch.from_numpy(table_appear_mask)
if self.cuda:
table_appear_mask_val = table_appear_mask_val.cuda()
if self.use_column_pointer:
gate = F.sigmoid(self.prob_att(att_t))
weights = self.column_pointer_net(src_encodings=table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None) * table_appear_mask_val * gate + self.column_pointer_net(
src_encodings=table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None) * (1 - table_appear_mask_val) * (1 - gate)
else:
weights = self.column_pointer_net(src_encodings=table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=batch.table_token_mask)
weights.data.masked_fill_(batch.table_token_mask.bool(), -float('inf'))
column_attention_weights = F.softmax(weights, dim=-1)
table_weights = self.table_pointer_net(src_encodings=schema_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None)
schema_token_mask = batch.schema_token_mask.expand_as(table_weights)
table_weights.data.masked_fill_(schema_token_mask.bool(), -float('inf'))
table_dict = [batch_table_dict[x_id][int(x)] for x_id, x in enumerate(table_enable.tolist())]
table_mask = batch.table_dict_mask(table_dict)
table_weights.data.masked_fill_(table_mask.bool(), -float('inf'))
table_weights = F.softmax(table_weights, dim=-1)
# now get the loss
for e_id, example in enumerate(examples):
if t < len(example.tgt_actions):
action_t = example.tgt_actions[t]
if isinstance(action_t, define_rule.C):
table_appear_mask[e_id, action_t.id_c] = 1
table_enable[e_id] = action_t.id_c
act_prob_t_i = column_attention_weights[e_id, action_t.id_c]
action_probs[e_id].append(act_prob_t_i)
elif isinstance(action_t, define_rule.T):
act_prob_t_i = table_weights[e_id, action_t.id_c]
action_probs[e_id].append(act_prob_t_i)
elif isinstance(action_t, define_rule.A):
act_prob_t_i = apply_rule_prob[e_id, self.grammar.prod2id[action_t.production]]
action_probs[e_id].append(act_prob_t_i)
else:
pass
h_tm1 = (h_t, cell_t)
att_tm1 = att_t
lf_prob_var = torch.stack(
[torch.stack(action_probs_i, dim=0).log().sum() for action_probs_i in action_probs], dim=0)
return [sketch_prob_var, lf_prob_var]
def parse(self, examples, beam_size=5):
"""
one example a time
:param examples:
:param beam_size:
:return:
"""
batch = Batch([examples], self.grammar, cuda=self.args.cuda)
src_encodings, (last_state, last_cell) = self.encode(batch.src_sents, batch.src_sents_len, None)
src_encodings = self.dropout(src_encodings)
utterance_encodings_sketch_linear = self.att_sketch_linear(src_encodings)
utterance_encodings_lf_linear = self.att_lf_linear(src_encodings)
dec_init_vec = self.init_decoder_state(last_cell)
h_tm1 = dec_init_vec
t = 0
beams = [Beams(is_sketch=True)]
completed_beams = []
while len(completed_beams) < beam_size and t < self.args.decode_max_time_step:
hyp_num = len(beams)
exp_src_enconding = src_encodings.expand(hyp_num, src_encodings.size(1),
src_encodings.size(2))
exp_src_encodings_sketch_linear = utterance_encodings_sketch_linear.expand(hyp_num,
utterance_encodings_sketch_linear.size(
1),
utterance_encodings_sketch_linear.size(
2))
if t == 0:
with torch.no_grad():
x = Variable(self.new_tensor(1, self.sketch_decoder_lstm.input_size).zero_())
else:
a_tm1_embeds = []
pre_types = []
for e_id, hyp in enumerate(beams):
action_tm1 = hyp.actions[-1]
if type(action_tm1) in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order]:
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
raise ValueError('unknown action %s' % action_tm1)
a_tm1_embeds.append(a_tm1_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
for e_id, hyp in enumerate(beams):
action_tm = hyp.actions[-1]
pre_type = self.type_embed.weight[self.grammar.type2id[type(action_tm)]]
pre_types.append(pre_type)
pre_types = torch.stack(pre_types)
inputs.append(att_tm1)
inputs.append(pre_types)
x = torch.cat(inputs, dim=-1)
(h_t, cell_t), att_t = self.step(x, h_tm1, exp_src_enconding,
exp_src_encodings_sketch_linear, self.sketch_decoder_lstm,
self.sketch_att_vec_linear,
src_token_mask=None)
apply_rule_log_prob = F.log_softmax(self.production_readout(att_t), dim=-1)
new_hyp_meta = []
for hyp_id, hyp in enumerate(beams):
action_class = hyp.get_availableClass()
if action_class in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order]:
possible_productions = self.grammar.get_production(action_class)
for possible_production in possible_productions:
prod_id = self.grammar.prod2id[possible_production]
prod_score = apply_rule_log_prob[hyp_id, prod_id]
new_hyp_score = hyp.score + prod_score.data.cpu()
meta_entry = {'action_type': action_class, 'prod_id': prod_id,
'score': prod_score, 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
else:
raise RuntimeError('No right action class')
if not new_hyp_meta: break
new_hyp_scores = torch.stack([x['new_hyp_score'] for x in new_hyp_meta], dim=0)
top_new_hyp_scores, meta_ids = torch.topk(new_hyp_scores,
k=min(new_hyp_scores.size(0),
beam_size - len(completed_beams)))
live_hyp_ids = []
new_beams = []
for new_hyp_score, meta_id in zip(top_new_hyp_scores.data.cpu(), meta_ids.data.cpu()):
action_info = ActionInfo()
hyp_meta_entry = new_hyp_meta[meta_id]
prev_hyp_id = hyp_meta_entry['prev_hyp_id']
prev_hyp = beams[prev_hyp_id]
action_type_str = hyp_meta_entry['action_type']
prod_id = hyp_meta_entry['prod_id']
if prod_id < len(self.grammar.id2prod):
production = self.grammar.id2prod[prod_id]
action = action_type_str(list(action_type_str._init_grammar()).index(production))
else:
raise NotImplementedError
action_info.action = action
action_info.t = t
action_info.score = hyp_meta_entry['score']
new_hyp = prev_hyp.clone_and_apply_action_info(action_info)
new_hyp.score = new_hyp_score
new_hyp.inputs.extend(prev_hyp.inputs)
if new_hyp.is_valid is False:
continue
if new_hyp.completed:
completed_beams.append(new_hyp)
else:
new_beams.append(new_hyp)
live_hyp_ids.append(prev_hyp_id)
if live_hyp_ids:
h_tm1 = (h_t[live_hyp_ids], cell_t[live_hyp_ids])
att_tm1 = att_t[live_hyp_ids]
beams = new_beams
t += 1
else:
break
# now get the sketch result
completed_beams.sort(key=lambda hyp: -hyp.score)
if len(completed_beams) == 0:
return [[], []]
sketch_actions = completed_beams[0].actions
# sketch_actions = examples.sketch
padding_sketch = self.padding_sketch(sketch_actions)
table_embedding = self.gen_x_batch(batch.table_sents)
src_embedding = self.gen_x_batch(batch.src_sents)
schema_embedding = self.gen_x_batch(batch.table_names)
# get emb differ
embedding_differ = self.embedding_cosine(src_embedding=src_embedding, table_embedding=table_embedding,
table_unk_mask=batch.table_unk_mask)
schema_differ = self.embedding_cosine(src_embedding=src_embedding, table_embedding=schema_embedding,
table_unk_mask=batch.schema_token_mask)
tab_ctx = (src_encodings.unsqueeze(1) * embedding_differ.unsqueeze(3)).sum(2)
schema_ctx = (src_encodings.unsqueeze(1) * schema_differ.unsqueeze(3)).sum(2)
table_embedding = table_embedding + tab_ctx
schema_embedding = schema_embedding + schema_ctx
col_type = self.input_type(batch.col_hot_type)
col_type_var = self.col_type(col_type)
table_embedding = table_embedding + col_type_var
batch_table_dict = batch.col_table_dict
h_tm1 = dec_init_vec
t = 0
beams = [Beams(is_sketch=False)]
completed_beams = []
while len(completed_beams) < beam_size and t < self.args.decode_max_time_step:
hyp_num = len(beams)
# expand value
exp_src_encodings = src_encodings.expand(hyp_num, src_encodings.size(1),
src_encodings.size(2))
exp_utterance_encodings_lf_linear = utterance_encodings_lf_linear.expand(hyp_num,
utterance_encodings_lf_linear.size(
1),
utterance_encodings_lf_linear.size(
2))
exp_table_embedding = table_embedding.expand(hyp_num, table_embedding.size(1),
table_embedding.size(2))
exp_schema_embedding = schema_embedding.expand(hyp_num, schema_embedding.size(1),
schema_embedding.size(2))
table_appear_mask = batch.table_appear_mask
table_appear_mask = np.zeros((hyp_num, table_appear_mask.shape[1]), dtype=np.float32)
table_enable = np.zeros(shape=(hyp_num))
for e_id, hyp in enumerate(beams):
for act in hyp.actions:
if type(act) == define_rule.C:
table_appear_mask[e_id][act.id_c] = 1
table_enable[e_id] = act.id_c
if t == 0:
with torch.no_grad():
x = Variable(self.new_tensor(1, self.lf_decoder_lstm.input_size).zero_())
else:
a_tm1_embeds = []
pre_types = []
for e_id, hyp in enumerate(beams):
action_tm1 = hyp.actions[-1]
if type(action_tm1) in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order]:
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
hyp.sketch_step += 1
elif isinstance(action_tm1, define_rule.C):
a_tm1_embed = self.column_rnn_input(table_embedding[0, action_tm1.id_c])
elif isinstance(action_tm1, define_rule.T):
a_tm1_embed = self.column_rnn_input(schema_embedding[0, action_tm1.id_c])
elif isinstance(action_tm1, define_rule.A):
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
raise ValueError('unknown action %s' % action_tm1)
a_tm1_embeds.append(a_tm1_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
for e_id, hyp in enumerate(beams):
action_tm = hyp.actions[-1]
pre_type = self.type_embed.weight[self.grammar.type2id[type(action_tm)]]
pre_types.append(pre_type)
pre_types = torch.stack(pre_types)
inputs.append(att_tm1)
inputs.append(pre_types)
x = torch.cat(inputs, dim=-1)
(h_t, cell_t), att_t = self.step(x, h_tm1, exp_src_encodings,
exp_utterance_encodings_lf_linear, self.lf_decoder_lstm,
self.lf_att_vec_linear,
src_token_mask=None)
apply_rule_log_prob = F.log_softmax(self.production_readout(att_t), dim=-1)
table_appear_mask_val = torch.from_numpy(table_appear_mask)
if self.args.cuda: table_appear_mask_val = table_appear_mask_val.cuda()
if self.use_column_pointer:
gate = F.sigmoid(self.prob_att(att_t))
weights = self.column_pointer_net(src_encodings=exp_table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None) * table_appear_mask_val * gate + self.column_pointer_net(
src_encodings=exp_table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None) * (1 - table_appear_mask_val) * (1 - gate)
# weights = weights + self.col_attention_out(exp_embedding_differ).squeeze()
else:
weights = self.column_pointer_net(src_encodings=exp_table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=batch.table_token_mask)
# weights.data.masked_fill_(exp_col_pred_mask, -float('inf'))
column_selection_log_prob = F.log_softmax(weights, dim=-1)
table_weights = self.table_pointer_net(src_encodings=exp_schema_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None)
# table_weights = self.table_pointer_net(src_encodings=exp_schema_embedding, query_vec=att_t.unsqueeze(0), src_token_mask=None)
schema_token_mask = batch.schema_token_mask.expand_as(table_weights)
table_weights.data.masked_fill_(schema_token_mask.bool(), -float('inf'))
table_dict = [batch_table_dict[0][int(x)] for x_id, x in enumerate(table_enable.tolist())]
table_mask = batch.table_dict_mask(table_dict)
table_weights.data.masked_fill_(table_mask.bool(), -float('inf'))
table_weights = F.log_softmax(table_weights, dim=-1)
new_hyp_meta = []
for hyp_id, hyp in enumerate(beams):
# TODO: should change this
if type(padding_sketch[t]) == define_rule.A:
possible_productions = self.grammar.get_production(define_rule.A)
for possible_production in possible_productions:
prod_id = self.grammar.prod2id[possible_production]
prod_score = apply_rule_log_prob[hyp_id, prod_id]
new_hyp_score = hyp.score + prod_score.data.cpu()
meta_entry = {'action_type': define_rule.A, 'prod_id': prod_id,
'score': prod_score, 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
elif type(padding_sketch[t]) == define_rule.C:
for col_id, _ in enumerate(batch.table_sents[0]):
col_sel_score = column_selection_log_prob[hyp_id, col_id]
new_hyp_score = hyp.score + col_sel_score.data.cpu()
meta_entry = {'action_type': define_rule.C, 'col_id': col_id,
'score': col_sel_score, 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
elif type(padding_sketch[t]) == define_rule.T:
for t_id, _ in enumerate(batch.table_names[0]):
t_sel_score = table_weights[hyp_id, t_id]
new_hyp_score = hyp.score + t_sel_score.data.cpu()
meta_entry = {'action_type': define_rule.T, 't_id': t_id,
'score': t_sel_score, 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
else:
prod_id = self.grammar.prod2id[padding_sketch[t].production]
new_hyp_score = hyp.score + torch.tensor(0.0)
meta_entry = {'action_type': type(padding_sketch[t]), 'prod_id': prod_id,
'score': torch.tensor(0.0), 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
if not new_hyp_meta: break
new_hyp_scores = torch.stack([x['new_hyp_score'] for x in new_hyp_meta], dim=0)
top_new_hyp_scores, meta_ids = torch.topk(new_hyp_scores,
k=min(new_hyp_scores.size(0),
beam_size - len(completed_beams)))
live_hyp_ids = []
new_beams = []
for new_hyp_score, meta_id in zip(top_new_hyp_scores.data.cpu(), meta_ids.data.cpu()):
action_info = ActionInfo()
hyp_meta_entry = new_hyp_meta[meta_id]
prev_hyp_id = hyp_meta_entry['prev_hyp_id']
prev_hyp = beams[prev_hyp_id]
action_type_str = hyp_meta_entry['action_type']
if 'prod_id' in hyp_meta_entry:
prod_id = hyp_meta_entry['prod_id']
if action_type_str == define_rule.C:
col_id = hyp_meta_entry['col_id']
action = define_rule.C(col_id)
elif action_type_str == define_rule.T:
t_id = hyp_meta_entry['t_id']
action = define_rule.T(t_id)
elif prod_id < len(self.grammar.id2prod):
production = self.grammar.id2prod[prod_id]
action = action_type_str(list(action_type_str._init_grammar()).index(production))
else:
raise NotImplementedError
action_info.action = action
action_info.t = t
action_info.score = hyp_meta_entry['score']
new_hyp = prev_hyp.clone_and_apply_action_info(action_info)
new_hyp.score = new_hyp_score
new_hyp.inputs.extend(prev_hyp.inputs)
if new_hyp.is_valid is False:
continue
if new_hyp.completed:
completed_beams.append(new_hyp)
else:
new_beams.append(new_hyp)
live_hyp_ids.append(prev_hyp_id)
if live_hyp_ids:
h_tm1 = (h_t[live_hyp_ids], cell_t[live_hyp_ids])
att_tm1 = att_t[live_hyp_ids]
beams = new_beams
t += 1
else:
break
completed_beams.sort(key=lambda hyp: -hyp.score)
return [completed_beams, sketch_actions]
def step(self, x, h_tm1, src_encodings, src_encodings_att_linear, decoder, attention_func, src_token_mask=None,
return_att_weight=False):
# h_t: (batch_size, hidden_size)
h_t, cell_t = decoder(x, h_tm1)
ctx_t, alpha_t = nn_utils.dot_prod_attention(h_t,
src_encodings, src_encodings_att_linear,
mask=src_token_mask)
att_t = F.tanh(attention_func(torch.cat([h_t, ctx_t], 1)))
att_t = self.dropout(att_t)
if return_att_weight:
return (h_t, cell_t), att_t, alpha_t
else:
return (h_t, cell_t), att_t
def init_decoder_state(self, enc_last_cell):
h_0 = self.decoder_cell_init(enc_last_cell)
h_0 = F.tanh(h_0)
return h_0, Variable(self.new_tensor(h_0.size()).zero_())
| 48.300654 | 140 | 0.532476 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils
from torch.autograd import Variable
from src.beam import Beams, ActionInfo
from src.dataset import Batch
from src.models import nn_utils
from src.models.basic_model import BasicModel
from src.models.pointer_net import PointerNet
from src.rule import semQL as define_rule
class IRNet(BasicModel):
def __init__(self, args, grammar):
super(IRNet, self).__init__()
self.args = args
self.grammar = grammar
self.use_column_pointer = args.column_pointer
self.use_sentence_features = args.sentence_features
if args.cuda:
self.new_long_tensor = torch.cuda.LongTensor
self.new_tensor = torch.cuda.FloatTensor
else:
self.new_long_tensor = torch.LongTensor
self.new_tensor = torch.FloatTensor
self.encoder_lstm = nn.LSTM(args.embed_size, args.hidden_size // 2, bidirectional=True,
batch_first=True)
input_dim = args.action_embed_size + \
args.att_vec_size + \
args.type_embed_size
self.lf_decoder_lstm = nn.LSTMCell(input_dim, args.hidden_size)
self.sketch_decoder_lstm = nn.LSTMCell(input_dim, args.hidden_size)
self.decoder_cell_init = nn.Linear(args.hidden_size, args.hidden_size)
self.att_sketch_linear = nn.Linear(args.hidden_size, args.hidden_size, bias=False)
self.att_lf_linear = nn.Linear(args.hidden_size, args.hidden_size, bias=False)
self.sketch_att_vec_linear = nn.Linear(args.hidden_size + args.hidden_size, args.att_vec_size, bias=False)
self.lf_att_vec_linear = nn.Linear(args.hidden_size + args.hidden_size, args.att_vec_size, bias=False)
self.prob_att = nn.Linear(args.att_vec_size, 1)
self.prob_len = nn.Linear(1, 1)
self.col_type = nn.Linear(4, args.col_embed_size)
self.sketch_encoder = nn.LSTM(args.action_embed_size, args.action_embed_size // 2, bidirectional=True,
batch_first=True)
self.production_embed = nn.Embedding(len(grammar.prod2id), args.action_embed_size)
self.type_embed = nn.Embedding(len(grammar.type2id), args.type_embed_size)
self.production_readout_b = nn.Parameter(torch.FloatTensor(len(grammar.prod2id)).zero_())
self.att_project = nn.Linear(args.hidden_size + args.type_embed_size, args.hidden_size)
self.N_embed = nn.Embedding(len(define_rule.N._init_grammar()), args.action_embed_size)
self.read_out_act = F.tanh if args.readout == 'non_linear' else nn_utils.identity
self.query_vec_to_action_embed = nn.Linear(args.att_vec_size, args.action_embed_size,
bias=args.readout == 'non_linear')
self.production_readout = lambda q: F.linear(self.read_out_act(self.query_vec_to_action_embed(q)),
self.production_embed.weight, self.production_readout_b)
self.q_att = nn.Linear(args.hidden_size, args.embed_size)
self.column_rnn_input = nn.Linear(args.col_embed_size, args.action_embed_size, bias=False)
self.table_rnn_input = nn.Linear(args.col_embed_size, args.action_embed_size, bias=False)
self.dropout = nn.Dropout(args.dropout)
self.column_pointer_net = PointerNet(args.hidden_size, args.col_embed_size, attention_type=args.column_att)
self.table_pointer_net = PointerNet(args.hidden_size, args.col_embed_size, attention_type=args.column_att)
# initial the embedding layers
nn.init.xavier_normal_(self.production_embed.weight.data)
nn.init.xavier_normal_(self.type_embed.weight.data)
nn.init.xavier_normal_(self.N_embed.weight.data)
print('Use Column Pointer: ', True if self.use_column_pointer else False)
def forward(self, examples):
args = self.args
# now should implement the examples
batch = Batch(examples, self.grammar, cuda=self.args.cuda)
table_appear_mask = batch.table_appear_mask
src_encodings, (last_state, last_cell) = self.encode(batch.src_sents, batch.src_sents_len, None)
src_encodings = self.dropout(src_encodings)
utterance_encodings_sketch_linear = self.att_sketch_linear(src_encodings)
utterance_encodings_lf_linear = self.att_lf_linear(src_encodings)
dec_init_vec = self.init_decoder_state(last_cell)
h_tm1 = dec_init_vec
action_probs = [[] for _ in examples]
zero_action_embed = Variable(self.new_tensor(args.action_embed_size).zero_())
zero_type_embed = Variable(self.new_tensor(args.type_embed_size).zero_())
sketch_attention_history = list()
for t in range(batch.max_sketch_num):
if t == 0:
x = Variable(self.new_tensor(len(batch), self.sketch_decoder_lstm.input_size).zero_(),
requires_grad=False)
else:
a_tm1_embeds = []
pre_types = []
for e_id, example in enumerate(examples):
if t < len(example.sketch):
# get the last action
# This is the action embedding
action_tm1 = example.sketch[t - 1]
if type(action_tm1) in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order]:
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
print(action_tm1, 'only for sketch')
quit()
a_tm1_embed = zero_action_embed
pass
else:
a_tm1_embed = zero_action_embed
a_tm1_embeds.append(a_tm1_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
for e_id, example in enumerate(examples):
if t < len(example.sketch):
action_tm = example.sketch[t - 1]
pre_type = self.type_embed.weight[self.grammar.type2id[type(action_tm)]]
else:
pre_type = zero_type_embed
pre_types.append(pre_type)
pre_types = torch.stack(pre_types)
inputs.append(att_tm1)
inputs.append(pre_types)
x = torch.cat(inputs, dim=-1)
src_mask = batch.src_token_mask
(h_t, cell_t), att_t, aw = self.step(x, h_tm1, src_encodings,
utterance_encodings_sketch_linear, self.sketch_decoder_lstm,
self.sketch_att_vec_linear,
src_token_mask=src_mask, return_att_weight=True)
sketch_attention_history.append(att_t)
# get the Root possibility
apply_rule_prob = F.softmax(self.production_readout(att_t), dim=-1)
for e_id, example in enumerate(examples):
if t < len(example.sketch):
action_t = example.sketch[t]
act_prob_t_i = apply_rule_prob[e_id, self.grammar.prod2id[action_t.production]]
action_probs[e_id].append(act_prob_t_i)
h_tm1 = (h_t, cell_t)
att_tm1 = att_t
sketch_prob_var = torch.stack(
[torch.stack(action_probs_i, dim=0).log().sum() for action_probs_i in action_probs], dim=0)
table_embedding = self.gen_x_batch(batch.table_sents)
src_embedding = self.gen_x_batch(batch.src_sents)
schema_embedding = self.gen_x_batch(batch.table_names)
# get emb differ
embedding_differ = self.embedding_cosine(src_embedding=src_embedding, table_embedding=table_embedding,
table_unk_mask=batch.table_unk_mask)
schema_differ = self.embedding_cosine(src_embedding=src_embedding, table_embedding=schema_embedding,
table_unk_mask=batch.schema_token_mask)
tab_ctx = (src_encodings.unsqueeze(1) * embedding_differ.unsqueeze(3)).sum(2)
schema_ctx = (src_encodings.unsqueeze(1) * schema_differ.unsqueeze(3)).sum(2)
table_embedding = table_embedding + tab_ctx
schema_embedding = schema_embedding + schema_ctx
col_type = self.input_type(batch.col_hot_type)
col_type_var = self.col_type(col_type)
table_embedding = table_embedding + col_type_var
batch_table_dict = batch.col_table_dict
table_enable = np.zeros(shape=(len(examples)))
action_probs = [[] for _ in examples]
h_tm1 = dec_init_vec
for t in range(batch.max_action_num):
if t == 0:
# x = self.lf_begin_vec.unsqueeze(0).repeat(len(batch), 1)
x = Variable(self.new_tensor(len(batch), self.lf_decoder_lstm.input_size).zero_(), requires_grad=False)
else:
a_tm1_embeds = []
pre_types = []
for e_id, example in enumerate(examples):
if t < len(example.tgt_actions):
action_tm1 = example.tgt_actions[t - 1]
if type(action_tm1) in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order,
]:
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
if isinstance(action_tm1, define_rule.C):
a_tm1_embed = self.column_rnn_input(table_embedding[e_id, action_tm1.id_c])
elif isinstance(action_tm1, define_rule.T):
a_tm1_embed = self.column_rnn_input(schema_embedding[e_id, action_tm1.id_c])
elif isinstance(action_tm1, define_rule.A):
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
print(action_tm1, 'not implement')
quit()
a_tm1_embed = zero_action_embed
pass
else:
a_tm1_embed = zero_action_embed
a_tm1_embeds.append(a_tm1_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
# tgt t-1 action type
for e_id, example in enumerate(examples):
if t < len(example.tgt_actions):
action_tm = example.tgt_actions[t - 1]
pre_type = self.type_embed.weight[self.grammar.type2id[type(action_tm)]]
else:
pre_type = zero_type_embed
pre_types.append(pre_type)
pre_types = torch.stack(pre_types)
inputs.append(att_tm1)
inputs.append(pre_types)
x = torch.cat(inputs, dim=-1)
src_mask = batch.src_token_mask
(h_t, cell_t), att_t, aw = self.step(x, h_tm1, src_encodings,
utterance_encodings_lf_linear, self.lf_decoder_lstm,
self.lf_att_vec_linear,
src_token_mask=src_mask, return_att_weight=True)
apply_rule_prob = F.softmax(self.production_readout(att_t), dim=-1)
table_appear_mask_val = torch.from_numpy(table_appear_mask)
if self.cuda:
table_appear_mask_val = table_appear_mask_val.cuda()
if self.use_column_pointer:
gate = F.sigmoid(self.prob_att(att_t))
weights = self.column_pointer_net(src_encodings=table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None) * table_appear_mask_val * gate + self.column_pointer_net(
src_encodings=table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None) * (1 - table_appear_mask_val) * (1 - gate)
else:
weights = self.column_pointer_net(src_encodings=table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=batch.table_token_mask)
weights.data.masked_fill_(batch.table_token_mask.bool(), -float('inf'))
column_attention_weights = F.softmax(weights, dim=-1)
table_weights = self.table_pointer_net(src_encodings=schema_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None)
schema_token_mask = batch.schema_token_mask.expand_as(table_weights)
table_weights.data.masked_fill_(schema_token_mask.bool(), -float('inf'))
table_dict = [batch_table_dict[x_id][int(x)] for x_id, x in enumerate(table_enable.tolist())]
table_mask = batch.table_dict_mask(table_dict)
table_weights.data.masked_fill_(table_mask.bool(), -float('inf'))
table_weights = F.softmax(table_weights, dim=-1)
# now get the loss
for e_id, example in enumerate(examples):
if t < len(example.tgt_actions):
action_t = example.tgt_actions[t]
if isinstance(action_t, define_rule.C):
table_appear_mask[e_id, action_t.id_c] = 1
table_enable[e_id] = action_t.id_c
act_prob_t_i = column_attention_weights[e_id, action_t.id_c]
action_probs[e_id].append(act_prob_t_i)
elif isinstance(action_t, define_rule.T):
act_prob_t_i = table_weights[e_id, action_t.id_c]
action_probs[e_id].append(act_prob_t_i)
elif isinstance(action_t, define_rule.A):
act_prob_t_i = apply_rule_prob[e_id, self.grammar.prod2id[action_t.production]]
action_probs[e_id].append(act_prob_t_i)
else:
pass
h_tm1 = (h_t, cell_t)
att_tm1 = att_t
lf_prob_var = torch.stack(
[torch.stack(action_probs_i, dim=0).log().sum() for action_probs_i in action_probs], dim=0)
return [sketch_prob_var, lf_prob_var]
def parse(self, examples, beam_size=5):
batch = Batch([examples], self.grammar, cuda=self.args.cuda)
src_encodings, (last_state, last_cell) = self.encode(batch.src_sents, batch.src_sents_len, None)
src_encodings = self.dropout(src_encodings)
utterance_encodings_sketch_linear = self.att_sketch_linear(src_encodings)
utterance_encodings_lf_linear = self.att_lf_linear(src_encodings)
dec_init_vec = self.init_decoder_state(last_cell)
h_tm1 = dec_init_vec
t = 0
beams = [Beams(is_sketch=True)]
completed_beams = []
while len(completed_beams) < beam_size and t < self.args.decode_max_time_step:
hyp_num = len(beams)
exp_src_enconding = src_encodings.expand(hyp_num, src_encodings.size(1),
src_encodings.size(2))
exp_src_encodings_sketch_linear = utterance_encodings_sketch_linear.expand(hyp_num,
utterance_encodings_sketch_linear.size(
1),
utterance_encodings_sketch_linear.size(
2))
if t == 0:
with torch.no_grad():
x = Variable(self.new_tensor(1, self.sketch_decoder_lstm.input_size).zero_())
else:
a_tm1_embeds = []
pre_types = []
for e_id, hyp in enumerate(beams):
action_tm1 = hyp.actions[-1]
if type(action_tm1) in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order]:
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
raise ValueError('unknown action %s' % action_tm1)
a_tm1_embeds.append(a_tm1_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
for e_id, hyp in enumerate(beams):
action_tm = hyp.actions[-1]
pre_type = self.type_embed.weight[self.grammar.type2id[type(action_tm)]]
pre_types.append(pre_type)
pre_types = torch.stack(pre_types)
inputs.append(att_tm1)
inputs.append(pre_types)
x = torch.cat(inputs, dim=-1)
(h_t, cell_t), att_t = self.step(x, h_tm1, exp_src_enconding,
exp_src_encodings_sketch_linear, self.sketch_decoder_lstm,
self.sketch_att_vec_linear,
src_token_mask=None)
apply_rule_log_prob = F.log_softmax(self.production_readout(att_t), dim=-1)
new_hyp_meta = []
for hyp_id, hyp in enumerate(beams):
action_class = hyp.get_availableClass()
if action_class in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order]:
possible_productions = self.grammar.get_production(action_class)
for possible_production in possible_productions:
prod_id = self.grammar.prod2id[possible_production]
prod_score = apply_rule_log_prob[hyp_id, prod_id]
new_hyp_score = hyp.score + prod_score.data.cpu()
meta_entry = {'action_type': action_class, 'prod_id': prod_id,
'score': prod_score, 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
else:
raise RuntimeError('No right action class')
if not new_hyp_meta: break
new_hyp_scores = torch.stack([x['new_hyp_score'] for x in new_hyp_meta], dim=0)
top_new_hyp_scores, meta_ids = torch.topk(new_hyp_scores,
k=min(new_hyp_scores.size(0),
beam_size - len(completed_beams)))
live_hyp_ids = []
new_beams = []
for new_hyp_score, meta_id in zip(top_new_hyp_scores.data.cpu(), meta_ids.data.cpu()):
action_info = ActionInfo()
hyp_meta_entry = new_hyp_meta[meta_id]
prev_hyp_id = hyp_meta_entry['prev_hyp_id']
prev_hyp = beams[prev_hyp_id]
action_type_str = hyp_meta_entry['action_type']
prod_id = hyp_meta_entry['prod_id']
if prod_id < len(self.grammar.id2prod):
production = self.grammar.id2prod[prod_id]
action = action_type_str(list(action_type_str._init_grammar()).index(production))
else:
raise NotImplementedError
action_info.action = action
action_info.t = t
action_info.score = hyp_meta_entry['score']
new_hyp = prev_hyp.clone_and_apply_action_info(action_info)
new_hyp.score = new_hyp_score
new_hyp.inputs.extend(prev_hyp.inputs)
if new_hyp.is_valid is False:
continue
if new_hyp.completed:
completed_beams.append(new_hyp)
else:
new_beams.append(new_hyp)
live_hyp_ids.append(prev_hyp_id)
if live_hyp_ids:
h_tm1 = (h_t[live_hyp_ids], cell_t[live_hyp_ids])
att_tm1 = att_t[live_hyp_ids]
beams = new_beams
t += 1
else:
break
# now get the sketch result
completed_beams.sort(key=lambda hyp: -hyp.score)
if len(completed_beams) == 0:
return [[], []]
sketch_actions = completed_beams[0].actions
# sketch_actions = examples.sketch
padding_sketch = self.padding_sketch(sketch_actions)
table_embedding = self.gen_x_batch(batch.table_sents)
src_embedding = self.gen_x_batch(batch.src_sents)
schema_embedding = self.gen_x_batch(batch.table_names)
# get emb differ
embedding_differ = self.embedding_cosine(src_embedding=src_embedding, table_embedding=table_embedding,
table_unk_mask=batch.table_unk_mask)
schema_differ = self.embedding_cosine(src_embedding=src_embedding, table_embedding=schema_embedding,
table_unk_mask=batch.schema_token_mask)
tab_ctx = (src_encodings.unsqueeze(1) * embedding_differ.unsqueeze(3)).sum(2)
schema_ctx = (src_encodings.unsqueeze(1) * schema_differ.unsqueeze(3)).sum(2)
table_embedding = table_embedding + tab_ctx
schema_embedding = schema_embedding + schema_ctx
col_type = self.input_type(batch.col_hot_type)
col_type_var = self.col_type(col_type)
table_embedding = table_embedding + col_type_var
batch_table_dict = batch.col_table_dict
h_tm1 = dec_init_vec
t = 0
beams = [Beams(is_sketch=False)]
completed_beams = []
while len(completed_beams) < beam_size and t < self.args.decode_max_time_step:
hyp_num = len(beams)
# expand value
exp_src_encodings = src_encodings.expand(hyp_num, src_encodings.size(1),
src_encodings.size(2))
exp_utterance_encodings_lf_linear = utterance_encodings_lf_linear.expand(hyp_num,
utterance_encodings_lf_linear.size(
1),
utterance_encodings_lf_linear.size(
2))
exp_table_embedding = table_embedding.expand(hyp_num, table_embedding.size(1),
table_embedding.size(2))
exp_schema_embedding = schema_embedding.expand(hyp_num, schema_embedding.size(1),
schema_embedding.size(2))
table_appear_mask = batch.table_appear_mask
table_appear_mask = np.zeros((hyp_num, table_appear_mask.shape[1]), dtype=np.float32)
table_enable = np.zeros(shape=(hyp_num))
for e_id, hyp in enumerate(beams):
for act in hyp.actions:
if type(act) == define_rule.C:
table_appear_mask[e_id][act.id_c] = 1
table_enable[e_id] = act.id_c
if t == 0:
with torch.no_grad():
x = Variable(self.new_tensor(1, self.lf_decoder_lstm.input_size).zero_())
else:
a_tm1_embeds = []
pre_types = []
for e_id, hyp in enumerate(beams):
action_tm1 = hyp.actions[-1]
if type(action_tm1) in [define_rule.Root1,
define_rule.Root,
define_rule.Sel,
define_rule.Filter,
define_rule.Sup,
define_rule.N,
define_rule.Order]:
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
hyp.sketch_step += 1
elif isinstance(action_tm1, define_rule.C):
a_tm1_embed = self.column_rnn_input(table_embedding[0, action_tm1.id_c])
elif isinstance(action_tm1, define_rule.T):
a_tm1_embed = self.column_rnn_input(schema_embedding[0, action_tm1.id_c])
elif isinstance(action_tm1, define_rule.A):
a_tm1_embed = self.production_embed.weight[self.grammar.prod2id[action_tm1.production]]
else:
raise ValueError('unknown action %s' % action_tm1)
a_tm1_embeds.append(a_tm1_embed)
a_tm1_embeds = torch.stack(a_tm1_embeds)
inputs = [a_tm1_embeds]
for e_id, hyp in enumerate(beams):
action_tm = hyp.actions[-1]
pre_type = self.type_embed.weight[self.grammar.type2id[type(action_tm)]]
pre_types.append(pre_type)
pre_types = torch.stack(pre_types)
inputs.append(att_tm1)
inputs.append(pre_types)
x = torch.cat(inputs, dim=-1)
(h_t, cell_t), att_t = self.step(x, h_tm1, exp_src_encodings,
exp_utterance_encodings_lf_linear, self.lf_decoder_lstm,
self.lf_att_vec_linear,
src_token_mask=None)
apply_rule_log_prob = F.log_softmax(self.production_readout(att_t), dim=-1)
table_appear_mask_val = torch.from_numpy(table_appear_mask)
if self.args.cuda: table_appear_mask_val = table_appear_mask_val.cuda()
if self.use_column_pointer:
gate = F.sigmoid(self.prob_att(att_t))
weights = self.column_pointer_net(src_encodings=exp_table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None) * table_appear_mask_val * gate + self.column_pointer_net(
src_encodings=exp_table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None) * (1 - table_appear_mask_val) * (1 - gate)
# weights = weights + self.col_attention_out(exp_embedding_differ).squeeze()
else:
weights = self.column_pointer_net(src_encodings=exp_table_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=batch.table_token_mask)
# weights.data.masked_fill_(exp_col_pred_mask, -float('inf'))
column_selection_log_prob = F.log_softmax(weights, dim=-1)
table_weights = self.table_pointer_net(src_encodings=exp_schema_embedding, query_vec=att_t.unsqueeze(0),
src_token_mask=None)
# table_weights = self.table_pointer_net(src_encodings=exp_schema_embedding, query_vec=att_t.unsqueeze(0), src_token_mask=None)
schema_token_mask = batch.schema_token_mask.expand_as(table_weights)
table_weights.data.masked_fill_(schema_token_mask.bool(), -float('inf'))
table_dict = [batch_table_dict[0][int(x)] for x_id, x in enumerate(table_enable.tolist())]
table_mask = batch.table_dict_mask(table_dict)
table_weights.data.masked_fill_(table_mask.bool(), -float('inf'))
table_weights = F.log_softmax(table_weights, dim=-1)
new_hyp_meta = []
for hyp_id, hyp in enumerate(beams):
# TODO: should change this
if type(padding_sketch[t]) == define_rule.A:
possible_productions = self.grammar.get_production(define_rule.A)
for possible_production in possible_productions:
prod_id = self.grammar.prod2id[possible_production]
prod_score = apply_rule_log_prob[hyp_id, prod_id]
new_hyp_score = hyp.score + prod_score.data.cpu()
meta_entry = {'action_type': define_rule.A, 'prod_id': prod_id,
'score': prod_score, 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
elif type(padding_sketch[t]) == define_rule.C:
for col_id, _ in enumerate(batch.table_sents[0]):
col_sel_score = column_selection_log_prob[hyp_id, col_id]
new_hyp_score = hyp.score + col_sel_score.data.cpu()
meta_entry = {'action_type': define_rule.C, 'col_id': col_id,
'score': col_sel_score, 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
elif type(padding_sketch[t]) == define_rule.T:
for t_id, _ in enumerate(batch.table_names[0]):
t_sel_score = table_weights[hyp_id, t_id]
new_hyp_score = hyp.score + t_sel_score.data.cpu()
meta_entry = {'action_type': define_rule.T, 't_id': t_id,
'score': t_sel_score, 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
else:
prod_id = self.grammar.prod2id[padding_sketch[t].production]
new_hyp_score = hyp.score + torch.tensor(0.0)
meta_entry = {'action_type': type(padding_sketch[t]), 'prod_id': prod_id,
'score': torch.tensor(0.0), 'new_hyp_score': new_hyp_score,
'prev_hyp_id': hyp_id}
new_hyp_meta.append(meta_entry)
if not new_hyp_meta: break
new_hyp_scores = torch.stack([x['new_hyp_score'] for x in new_hyp_meta], dim=0)
top_new_hyp_scores, meta_ids = torch.topk(new_hyp_scores,
k=min(new_hyp_scores.size(0),
beam_size - len(completed_beams)))
live_hyp_ids = []
new_beams = []
for new_hyp_score, meta_id in zip(top_new_hyp_scores.data.cpu(), meta_ids.data.cpu()):
action_info = ActionInfo()
hyp_meta_entry = new_hyp_meta[meta_id]
prev_hyp_id = hyp_meta_entry['prev_hyp_id']
prev_hyp = beams[prev_hyp_id]
action_type_str = hyp_meta_entry['action_type']
if 'prod_id' in hyp_meta_entry:
prod_id = hyp_meta_entry['prod_id']
if action_type_str == define_rule.C:
col_id = hyp_meta_entry['col_id']
action = define_rule.C(col_id)
elif action_type_str == define_rule.T:
t_id = hyp_meta_entry['t_id']
action = define_rule.T(t_id)
elif prod_id < len(self.grammar.id2prod):
production = self.grammar.id2prod[prod_id]
action = action_type_str(list(action_type_str._init_grammar()).index(production))
else:
raise NotImplementedError
action_info.action = action
action_info.t = t
action_info.score = hyp_meta_entry['score']
new_hyp = prev_hyp.clone_and_apply_action_info(action_info)
new_hyp.score = new_hyp_score
new_hyp.inputs.extend(prev_hyp.inputs)
if new_hyp.is_valid is False:
continue
if new_hyp.completed:
completed_beams.append(new_hyp)
else:
new_beams.append(new_hyp)
live_hyp_ids.append(prev_hyp_id)
if live_hyp_ids:
h_tm1 = (h_t[live_hyp_ids], cell_t[live_hyp_ids])
att_tm1 = att_t[live_hyp_ids]
beams = new_beams
t += 1
else:
break
completed_beams.sort(key=lambda hyp: -hyp.score)
return [completed_beams, sketch_actions]
def step(self, x, h_tm1, src_encodings, src_encodings_att_linear, decoder, attention_func, src_token_mask=None,
return_att_weight=False):
# h_t: (batch_size, hidden_size)
h_t, cell_t = decoder(x, h_tm1)
ctx_t, alpha_t = nn_utils.dot_prod_attention(h_t,
src_encodings, src_encodings_att_linear,
mask=src_token_mask)
att_t = F.tanh(attention_func(torch.cat([h_t, ctx_t], 1)))
att_t = self.dropout(att_t)
if return_att_weight:
return (h_t, cell_t), att_t, alpha_t
else:
return (h_t, cell_t), att_t
def init_decoder_state(self, enc_last_cell):
h_0 = self.decoder_cell_init(enc_last_cell)
h_0 = F.tanh(h_0)
return h_0, Variable(self.new_tensor(h_0.size()).zero_())
| true | true |
1c32dbb26c5581a55894974e838f48581581ce9e | 20,395 | py | Python | cnns/foolbox/foolbox_2_3_0/attacks/hop_skip_jump_attack.py | anonymous-user-commits/perturb-net | 66fc7c4a1234fa34b92bcc85751f0a6e23d80a23 | [
"MIT"
] | 12 | 2021-07-27T07:18:24.000Z | 2022-03-09T13:52:20.000Z | cnns/foolbox/foolbox_2_3_0/attacks/hop_skip_jump_attack.py | anonymous-user-commits/perturb-net | 66fc7c4a1234fa34b92bcc85751f0a6e23d80a23 | [
"MIT"
] | 2 | 2021-08-03T09:21:33.000Z | 2021-12-29T14:25:30.000Z | cnns/foolbox/foolbox_2_3_0/attacks/hop_skip_jump_attack.py | anonymous-user-commits/perturb-net | 66fc7c4a1234fa34b92bcc85751f0a6e23d80a23 | [
"MIT"
] | 3 | 2021-11-18T14:46:40.000Z | 2022-01-03T15:47:23.000Z | import warnings
import time
import sys
from .base import Attack
from .base import generator_decorator
from ..distances import MSE, Linf
from ..criteria import Misclassification
import numpy as np
import math
from warnings import warn
import logging
class HopSkipJumpAttack(Attack):
"""A powerful adversarial attack that requires neither gradients
nor probabilities.
Notes
-----
Features:
* ability to switch between two types of distances: MSE and Linf.
* ability to continue previous attacks by passing an instance of the
Adversarial class
* ability to pass an explicit starting point; especially to initialize
a targeted attack
* ability to pass an alternative attack used for initialization
* ability to specify the batch size
References
----------
..
HopSkipJumpAttack was originally proposed by Chen, Jordan and
Wainwright.
It is a decision-based attack that requires access to output
labels of a model alone.
Paper link: https://arxiv.org/abs/1904.02144
The implementation in Foolbox is based on Boundary Attack.
"""
@generator_decorator
def as_generator(
self,
a,
iterations=64,
initial_num_evals=100,
max_num_evals=10000,
stepsize_search="geometric_progression",
gamma=1.0,
starting_point=None,
batch_size=256,
internal_dtype=np.float64,
log_every_n_steps=None,
loggingLevel=logging.WARNING,
):
"""Applies HopSkipJumpAttack.
Parameters
----------
input_or_adv : `numpy.ndarray` or :class:`Adversarial`
The original, correctly classified input. If it is a
numpy array, label must be passed as well. If it is
an :class:`Adversarial` instance, label must not be passed.
label : int
The reference label of the original input. Must be passed
if input is a numpy array, must not be passed if input is
an :class:`Adversarial` instance.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
iterations : int
Number of iterations to run.
initial_num_evals: int
Initial number of evaluations for gradient estimation.
Larger initial_num_evals increases time efficiency, but
may decrease query efficiency.
max_num_evals: int
Maximum number of evaluations for gradient estimation.
stepsize_search: str
How to search for stepsize; choices are 'geometric_progression',
'grid_search'. 'geometric progression' initializes the stepsize
by ||x_t - x||_p / sqrt(iteration), and keep decreasing by half
until reaching the target side of the boundary. 'grid_search'
chooses the optimal epsilon over a grid, in the scale of
||x_t - x||_p.
gamma: float
The binary search threshold theta is gamma / d^1.5 for
l2 attack and gamma / d^2 for linf attack.
starting_point : `numpy.ndarray`
Adversarial input to use as a starting point, required
for targeted attacks.
batch_size : int
Batch size for model prediction.
internal_dtype : np.float32 or np.float64
Higher precision might be slower but is numerically more stable.
log_every_n_steps : int
Determines verbositity of the logging.
loggingLevel : int
Controls the verbosity of the logging, e.g. logging.INFO
or logging.WARNING.
"""
self.initial_num_evals = initial_num_evals
self.max_num_evals = max_num_evals
self.stepsize_search = stepsize_search
self.gamma = gamma
self.batch_size = batch_size
self._starting_point = starting_point
self.internal_dtype = internal_dtype
self.log_every_n_steps = log_every_n_steps
self.logger = logging.getLogger("BoundaryAttack")
self.logger.setLevel(loggingLevel)
# Set constraint based on the distance.
if self._default_distance == MSE:
self.constraint = "l2"
elif self._default_distance == Linf:
self.constraint = "linf"
# Set binary search threshold.
self.shape = a.unperturbed.shape
self.d = np.prod(self.shape)
if self.constraint == "l2":
self.theta = self.gamma / (np.sqrt(self.d) * self.d)
else:
self.theta = self.gamma / (self.d * self.d)
logging.info(
"HopSkipJumpAttack optimized for {} distance".format(self.constraint)
)
yield from self.attack(a, iterations=iterations)
def attack(self, a, iterations):
"""
iterations : int
Maximum number of iterations to run.
"""
self.t_initial = time.time()
# ===========================================================
# Increase floating point precision
# ===========================================================
self.external_dtype = a.unperturbed.dtype
assert self.internal_dtype in [np.float32, np.float64]
assert self.external_dtype in [np.float32, np.float64]
assert not (
self.external_dtype == np.float64 and self.internal_dtype == np.float32
)
a.set_distance_dtype(self.internal_dtype)
# ===========================================================
# Construct batch decision function with binary output.
# ===========================================================
# decision_function = lambda x: a.forward(
# x.astype(self.external_dtype), strict=False)[1]
def decision_function(x):
outs = []
num_batchs = int(math.ceil(len(x) * 1.0 / self.batch_size))
for j in range(num_batchs):
current_batch = x[self.batch_size * j : self.batch_size * (j + 1)]
current_batch = current_batch.astype(self.external_dtype)
_, out = yield from a.forward(current_batch, strict=False)
outs.append(out)
outs = np.concatenate(outs, axis=0)
return outs
# ===========================================================
# intialize time measurements
# ===========================================================
self.time_gradient_estimation = 0
self.time_search = 0
self.time_initialization = 0
# ===========================================================
# Initialize variables, constants, hyperparameters, etc.
# ===========================================================
# make sure repeated warnings are shown
warnings.simplefilter("always", UserWarning)
# get bounds
bounds = a.bounds()
self.clip_min, self.clip_max = bounds
# ===========================================================
# Find starting point
# ===========================================================
yield from self.initialize_starting_point(a)
if a.perturbed is None:
warnings.warn(
"Initialization failed."
" it might be necessary to pass an explicit starting"
" point."
)
return
self.time_initialization += time.time() - self.t_initial
assert a.perturbed.dtype == self.external_dtype
# get original and starting point in the right format
original = a.unperturbed.astype(self.internal_dtype)
perturbed = a.perturbed.astype(self.internal_dtype)
# ===========================================================
# Iteratively refine adversarial
# ===========================================================
t0 = time.time()
# Project the initialization to the boundary.
perturbed, dist_post_update = yield from self.binary_search_batch(
original, np.expand_dims(perturbed, 0), decision_function
)
dist = self.compute_distance(perturbed, original)
distance = a.distance.value
self.time_search += time.time() - t0
# log starting point
self.log_step(0, distance)
for step in range(1, iterations + 1):
t0 = time.time()
# ===========================================================
# Gradient direction estimation.
# ===========================================================
# Choose delta.
delta = self.select_delta(dist_post_update, step)
# Choose number of evaluations.
num_evals = int(
min([self.initial_num_evals * np.sqrt(step), self.max_num_evals])
)
# approximate gradient.
gradf = yield from self.approximate_gradient(
decision_function, perturbed, num_evals, delta
)
if self.constraint == "linf":
update = np.sign(gradf)
else:
update = gradf
t1 = time.time()
self.time_gradient_estimation += t1 - t0
# ===========================================================
# Update, and binary search back to the boundary.
# ===========================================================
if self.stepsize_search == "geometric_progression":
# find step size.
epsilon = yield from self.geometric_progression_for_stepsize(
perturbed, update, dist, decision_function, step
)
# Update the sample.
perturbed = np.clip(
perturbed + epsilon * update, self.clip_min, self.clip_max
)
# Binary search to return to the boundary.
perturbed, dist_post_update = yield from self.binary_search_batch(
original, perturbed[None], decision_function
)
elif self.stepsize_search == "grid_search":
# Grid search for stepsize.
epsilons = np.logspace(-4, 0, num=20, endpoint=True) * dist
epsilons_shape = [20] + len(self.shape) * [1]
perturbeds = perturbed + epsilons.reshape(epsilons_shape) * update
perturbeds = np.clip(perturbeds, self.clip_min, self.clip_max)
idx_perturbed = yield from decision_function(perturbeds)
if np.sum(idx_perturbed) > 0:
# Select the perturbation that yields the minimum
# distance after binary search.
perturbed, dist_post_update = yield from self.binary_search_batch(
original, perturbeds[idx_perturbed], decision_function
)
t2 = time.time()
self.time_search += t2 - t1
# compute new distance.
dist = self.compute_distance(perturbed, original)
# ===========================================================
# Log the step
# ===========================================================
# Using foolbox definition of distance for logging.
if self.constraint == "l2":
distance = dist ** 2 / self.d / (self.clip_max - self.clip_min) ** 2
elif self.constraint == "linf":
distance = dist / (self.clip_max - self.clip_min)
message = " (took {:.5f} seconds)".format(t2 - t0)
self.log_step(step, distance, message)
sys.stdout.flush()
# ===========================================================
# Log overall runtime
# ===========================================================
self.log_time()
# ===============================================================
#
# Other methods
#
# ===============================================================
def initialize_starting_point(self, a):
starting_point = self._starting_point
if a.perturbed is not None:
print(
"Attack is applied to a previously found adversarial."
" Continuing search for better adversarials."
)
if starting_point is not None: # pragma: no cover
warnings.warn(
"Ignoring starting_point parameter because the attack"
" is applied to a previously found adversarial."
)
return
if starting_point is not None:
yield from a.forward_one(starting_point)
assert (
a.perturbed is not None
), "Invalid starting point provided. Please provide a starting point that is adversarial."
return
"""
Apply BlendedUniformNoiseAttack if without
initialization.
Efficient Implementation of BlendedUniformNoiseAttack in Foolbox.
"""
success = 0
num_evals = 0
while True:
random_noise = np.random.uniform(
self.clip_min, self.clip_max, size=self.shape
)
_, success = yield from a.forward_one(
random_noise.astype(self.external_dtype)
)
num_evals += 1
if success:
break
if num_evals > 1e4:
return
# Binary search to minimize l2 distance to the original input.
low = 0.0
high = 1.0
while high - low > 0.001:
mid = (high + low) / 2.0
blended = (1 - mid) * a.unperturbed + mid * random_noise
_, success = yield from a.forward_one(blended.astype(self.external_dtype))
if success:
high = mid
else:
low = mid
def compute_distance(self, x1, x2):
if self.constraint == "l2":
return np.linalg.norm(x1 - x2)
elif self.constraint == "linf":
return np.max(abs(x1 - x2))
def project(self, unperturbed, perturbed_inputs, alphas):
""" Projection onto given l2 / linf balls in a batch. """
alphas_shape = [len(alphas)] + [1] * len(self.shape)
alphas = alphas.reshape(alphas_shape)
if self.constraint == "l2":
projected = (1 - alphas) * unperturbed + alphas * perturbed_inputs
elif self.constraint == "linf":
projected = np.clip(
perturbed_inputs, unperturbed - alphas, unperturbed + alphas
)
return projected
def binary_search_batch(self, unperturbed, perturbed_inputs, decision_function):
""" Binary search to approach the boundary. """
# Compute distance between each of perturbed and unperturbed input.
dists_post_update = np.array(
[
self.compute_distance(unperturbed, perturbed_x)
for perturbed_x in perturbed_inputs
]
)
# Choose upper thresholds in binary searchs based on constraint.
if self.constraint == "linf":
highs = dists_post_update
# Stopping criteria.
thresholds = dists_post_update * self.theta
else:
highs = np.ones(len(perturbed_inputs))
thresholds = self.theta
lows = np.zeros(len(perturbed_inputs))
# Call recursive function.
while np.max((highs - lows) / thresholds) > 1:
# projection to mids.
mids = (highs + lows) / 2.0
mid_inputs = self.project(unperturbed, perturbed_inputs, mids)
# Update highs and lows based on model decisions.
decisions = yield from decision_function(mid_inputs)
lows = np.where(decisions == 0, mids, lows)
highs = np.where(decisions == 1, mids, highs)
out_inputs = self.project(unperturbed, perturbed_inputs, highs)
# Compute distance of the output to select the best choice.
# (only used when stepsize_search is grid_search.)
dists = np.array(
[self.compute_distance(unperturbed, out) for out in out_inputs]
)
idx = np.argmin(dists)
dist = dists_post_update[idx]
out = out_inputs[idx]
return out, dist
def select_delta(self, dist_post_update, current_iteration):
"""
Choose the delta at the scale of distance
between x and perturbed sample.
"""
if current_iteration == 1:
delta = 0.1 * (self.clip_max - self.clip_min)
else:
if self.constraint == "l2":
delta = np.sqrt(self.d) * self.theta * dist_post_update
elif self.constraint == "linf":
delta = self.d * self.theta * dist_post_update
return delta
def approximate_gradient(self, decision_function, sample, num_evals, delta):
""" Gradient direction estimation """
# Generate random vectors.
noise_shape = [num_evals] + list(self.shape)
if self.constraint == "l2":
rv = np.random.randn(*noise_shape)
elif self.constraint == "linf":
rv = np.random.uniform(low=-1, high=1, size=noise_shape)
axis = tuple(range(1, 1 + len(self.shape)))
rv = rv / np.sqrt(np.sum(rv ** 2, axis=axis, keepdims=True))
perturbed = sample + delta * rv
perturbed = np.clip(perturbed, self.clip_min, self.clip_max)
rv = (perturbed - sample) / delta
# query the model.
decisions = yield from decision_function(perturbed)
decision_shape = [len(decisions)] + [1] * len(self.shape)
fval = 2 * decisions.astype(self.internal_dtype).reshape(decision_shape) - 1.0
# Baseline subtraction (when fval differs)
vals = fval if abs(np.mean(fval)) == 1.0 else fval - np.mean(fval)
gradf = np.mean(vals * rv, axis=0)
# Get the gradient direction.
gradf = gradf / np.linalg.norm(gradf)
return gradf
def geometric_progression_for_stepsize(
self, x, update, dist, decision_function, current_iteration
):
""" Geometric progression to search for stepsize.
Keep decreasing stepsize by half until reaching
the desired side of the boundary.
"""
epsilon = dist / np.sqrt(current_iteration)
while True:
updated = np.clip(x + epsilon * update, self.clip_min, self.clip_max)
success = (yield from decision_function(updated[None]))[0]
if success:
break
else:
epsilon = epsilon / 2.0 # pragma: no cover
return epsilon
def log_step(self, step, distance, message="", always=False):
if self.log_every_n_steps is None or self.log_every_n_steps == np.inf:
return
if not always and step % self.log_every_n_steps != 0:
return
logging.info("Step {}: {:.5e} {}".format(step, distance, message))
def log_time(self):
t_total = time.time() - self.t_initial
rel_initialization = self.time_initialization / t_total
rel_gradient_estimation = self.time_gradient_estimation / t_total
rel_search = self.time_search / t_total
self.printv("Time since beginning: {:.5f}".format(t_total))
self.printv(
" {:2.1f}% for initialization ({:.5f})".format(
rel_initialization * 100, self.time_initialization
)
)
self.printv(
" {:2.1f}% for gradient estimation ({:.5f})".format(
rel_gradient_estimation * 100, self.time_gradient_estimation
)
)
self.printv(
" {:2.1f}% for search ({:.5f})".format(rel_search * 100, self.time_search)
)
def printv(self, *args, **kwargs):
self.logger.info(*args, **kwargs)
def BoundaryAttackPlusPlus(
model=None, criterion=Misclassification(), distance=MSE, threshold=None
):
warn("BoundaryAttackPlusPlus is deprecated; use HopSkipJumpAttack.")
return HopSkipJumpAttack(model, criterion, distance, threshold)
| 37.559853 | 102 | 0.54783 | import warnings
import time
import sys
from .base import Attack
from .base import generator_decorator
from ..distances import MSE, Linf
from ..criteria import Misclassification
import numpy as np
import math
from warnings import warn
import logging
class HopSkipJumpAttack(Attack):
@generator_decorator
def as_generator(
self,
a,
iterations=64,
initial_num_evals=100,
max_num_evals=10000,
stepsize_search="geometric_progression",
gamma=1.0,
starting_point=None,
batch_size=256,
internal_dtype=np.float64,
log_every_n_steps=None,
loggingLevel=logging.WARNING,
):
self.initial_num_evals = initial_num_evals
self.max_num_evals = max_num_evals
self.stepsize_search = stepsize_search
self.gamma = gamma
self.batch_size = batch_size
self._starting_point = starting_point
self.internal_dtype = internal_dtype
self.log_every_n_steps = log_every_n_steps
self.logger = logging.getLogger("BoundaryAttack")
self.logger.setLevel(loggingLevel)
if self._default_distance == MSE:
self.constraint = "l2"
elif self._default_distance == Linf:
self.constraint = "linf"
self.shape = a.unperturbed.shape
self.d = np.prod(self.shape)
if self.constraint == "l2":
self.theta = self.gamma / (np.sqrt(self.d) * self.d)
else:
self.theta = self.gamma / (self.d * self.d)
logging.info(
"HopSkipJumpAttack optimized for {} distance".format(self.constraint)
)
yield from self.attack(a, iterations=iterations)
def attack(self, a, iterations):
self.t_initial = time.time()
self.external_dtype = a.unperturbed.dtype
assert self.internal_dtype in [np.float32, np.float64]
assert self.external_dtype in [np.float32, np.float64]
assert not (
self.external_dtype == np.float64 and self.internal_dtype == np.float32
)
a.set_distance_dtype(self.internal_dtype)
def decision_function(x):
outs = []
num_batchs = int(math.ceil(len(x) * 1.0 / self.batch_size))
for j in range(num_batchs):
current_batch = x[self.batch_size * j : self.batch_size * (j + 1)]
current_batch = current_batch.astype(self.external_dtype)
_, out = yield from a.forward(current_batch, strict=False)
outs.append(out)
outs = np.concatenate(outs, axis=0)
return outs
self.time_gradient_estimation = 0
self.time_search = 0
self.time_initialization = 0
warnings.simplefilter("always", UserWarning)
bounds = a.bounds()
self.clip_min, self.clip_max = bounds
yield from self.initialize_starting_point(a)
if a.perturbed is None:
warnings.warn(
"Initialization failed."
" it might be necessary to pass an explicit starting"
" point."
)
return
self.time_initialization += time.time() - self.t_initial
assert a.perturbed.dtype == self.external_dtype
original = a.unperturbed.astype(self.internal_dtype)
perturbed = a.perturbed.astype(self.internal_dtype)
t0 = time.time()
perturbed, dist_post_update = yield from self.binary_search_batch(
original, np.expand_dims(perturbed, 0), decision_function
)
dist = self.compute_distance(perturbed, original)
distance = a.distance.value
self.time_search += time.time() - t0
self.log_step(0, distance)
for step in range(1, iterations + 1):
t0 = time.time()
delta = self.select_delta(dist_post_update, step)
num_evals = int(
min([self.initial_num_evals * np.sqrt(step), self.max_num_evals])
)
gradf = yield from self.approximate_gradient(
decision_function, perturbed, num_evals, delta
)
if self.constraint == "linf":
update = np.sign(gradf)
else:
update = gradf
t1 = time.time()
self.time_gradient_estimation += t1 - t0
if self.stepsize_search == "geometric_progression":
epsilon = yield from self.geometric_progression_for_stepsize(
perturbed, update, dist, decision_function, step
)
perturbed = np.clip(
perturbed + epsilon * update, self.clip_min, self.clip_max
)
perturbed, dist_post_update = yield from self.binary_search_batch(
original, perturbed[None], decision_function
)
elif self.stepsize_search == "grid_search":
epsilons = np.logspace(-4, 0, num=20, endpoint=True) * dist
epsilons_shape = [20] + len(self.shape) * [1]
perturbeds = perturbed + epsilons.reshape(epsilons_shape) * update
perturbeds = np.clip(perturbeds, self.clip_min, self.clip_max)
idx_perturbed = yield from decision_function(perturbeds)
if np.sum(idx_perturbed) > 0:
perturbed, dist_post_update = yield from self.binary_search_batch(
original, perturbeds[idx_perturbed], decision_function
)
t2 = time.time()
self.time_search += t2 - t1
dist = self.compute_distance(perturbed, original)
if self.constraint == "l2":
distance = dist ** 2 / self.d / (self.clip_max - self.clip_min) ** 2
elif self.constraint == "linf":
distance = dist / (self.clip_max - self.clip_min)
message = " (took {:.5f} seconds)".format(t2 - t0)
self.log_step(step, distance, message)
sys.stdout.flush()
self.log_time()
def initialize_starting_point(self, a):
starting_point = self._starting_point
if a.perturbed is not None:
print(
"Attack is applied to a previously found adversarial."
" Continuing search for better adversarials."
)
if starting_point is not None:
warnings.warn(
"Ignoring starting_point parameter because the attack"
" is applied to a previously found adversarial."
)
return
if starting_point is not None:
yield from a.forward_one(starting_point)
assert (
a.perturbed is not None
), "Invalid starting point provided. Please provide a starting point that is adversarial."
return
success = 0
num_evals = 0
while True:
random_noise = np.random.uniform(
self.clip_min, self.clip_max, size=self.shape
)
_, success = yield from a.forward_one(
random_noise.astype(self.external_dtype)
)
num_evals += 1
if success:
break
if num_evals > 1e4:
return
low = 0.0
high = 1.0
while high - low > 0.001:
mid = (high + low) / 2.0
blended = (1 - mid) * a.unperturbed + mid * random_noise
_, success = yield from a.forward_one(blended.astype(self.external_dtype))
if success:
high = mid
else:
low = mid
def compute_distance(self, x1, x2):
if self.constraint == "l2":
return np.linalg.norm(x1 - x2)
elif self.constraint == "linf":
return np.max(abs(x1 - x2))
def project(self, unperturbed, perturbed_inputs, alphas):
alphas_shape = [len(alphas)] + [1] * len(self.shape)
alphas = alphas.reshape(alphas_shape)
if self.constraint == "l2":
projected = (1 - alphas) * unperturbed + alphas * perturbed_inputs
elif self.constraint == "linf":
projected = np.clip(
perturbed_inputs, unperturbed - alphas, unperturbed + alphas
)
return projected
def binary_search_batch(self, unperturbed, perturbed_inputs, decision_function):
dists_post_update = np.array(
[
self.compute_distance(unperturbed, perturbed_x)
for perturbed_x in perturbed_inputs
]
)
if self.constraint == "linf":
highs = dists_post_update
thresholds = dists_post_update * self.theta
else:
highs = np.ones(len(perturbed_inputs))
thresholds = self.theta
lows = np.zeros(len(perturbed_inputs))
while np.max((highs - lows) / thresholds) > 1:
mids = (highs + lows) / 2.0
mid_inputs = self.project(unperturbed, perturbed_inputs, mids)
decisions = yield from decision_function(mid_inputs)
lows = np.where(decisions == 0, mids, lows)
highs = np.where(decisions == 1, mids, highs)
out_inputs = self.project(unperturbed, perturbed_inputs, highs)
dists = np.array(
[self.compute_distance(unperturbed, out) for out in out_inputs]
)
idx = np.argmin(dists)
dist = dists_post_update[idx]
out = out_inputs[idx]
return out, dist
def select_delta(self, dist_post_update, current_iteration):
if current_iteration == 1:
delta = 0.1 * (self.clip_max - self.clip_min)
else:
if self.constraint == "l2":
delta = np.sqrt(self.d) * self.theta * dist_post_update
elif self.constraint == "linf":
delta = self.d * self.theta * dist_post_update
return delta
def approximate_gradient(self, decision_function, sample, num_evals, delta):
noise_shape = [num_evals] + list(self.shape)
if self.constraint == "l2":
rv = np.random.randn(*noise_shape)
elif self.constraint == "linf":
rv = np.random.uniform(low=-1, high=1, size=noise_shape)
axis = tuple(range(1, 1 + len(self.shape)))
rv = rv / np.sqrt(np.sum(rv ** 2, axis=axis, keepdims=True))
perturbed = sample + delta * rv
perturbed = np.clip(perturbed, self.clip_min, self.clip_max)
rv = (perturbed - sample) / delta
decisions = yield from decision_function(perturbed)
decision_shape = [len(decisions)] + [1] * len(self.shape)
fval = 2 * decisions.astype(self.internal_dtype).reshape(decision_shape) - 1.0
vals = fval if abs(np.mean(fval)) == 1.0 else fval - np.mean(fval)
gradf = np.mean(vals * rv, axis=0)
gradf = gradf / np.linalg.norm(gradf)
return gradf
def geometric_progression_for_stepsize(
self, x, update, dist, decision_function, current_iteration
):
epsilon = dist / np.sqrt(current_iteration)
while True:
updated = np.clip(x + epsilon * update, self.clip_min, self.clip_max)
success = (yield from decision_function(updated[None]))[0]
if success:
break
else:
epsilon = epsilon / 2.0
return epsilon
def log_step(self, step, distance, message="", always=False):
if self.log_every_n_steps is None or self.log_every_n_steps == np.inf:
return
if not always and step % self.log_every_n_steps != 0:
return
logging.info("Step {}: {:.5e} {}".format(step, distance, message))
def log_time(self):
t_total = time.time() - self.t_initial
rel_initialization = self.time_initialization / t_total
rel_gradient_estimation = self.time_gradient_estimation / t_total
rel_search = self.time_search / t_total
self.printv("Time since beginning: {:.5f}".format(t_total))
self.printv(
" {:2.1f}% for initialization ({:.5f})".format(
rel_initialization * 100, self.time_initialization
)
)
self.printv(
" {:2.1f}% for gradient estimation ({:.5f})".format(
rel_gradient_estimation * 100, self.time_gradient_estimation
)
)
self.printv(
" {:2.1f}% for search ({:.5f})".format(rel_search * 100, self.time_search)
)
def printv(self, *args, **kwargs):
self.logger.info(*args, **kwargs)
def BoundaryAttackPlusPlus(
model=None, criterion=Misclassification(), distance=MSE, threshold=None
):
warn("BoundaryAttackPlusPlus is deprecated; use HopSkipJumpAttack.")
return HopSkipJumpAttack(model, criterion, distance, threshold)
| true | true |
1c32dd699853256f1e8ca3c43d488c9561e77f45 | 374 | py | Python | Exe13_Dias_da_semana.py | lucaslk122/Exercicios_Python_estutura_decisao | 51a9699c5d85aa6cfb163d891c56e804a7255634 | [
"MIT"
] | null | null | null | Exe13_Dias_da_semana.py | lucaslk122/Exercicios_Python_estutura_decisao | 51a9699c5d85aa6cfb163d891c56e804a7255634 | [
"MIT"
] | null | null | null | Exe13_Dias_da_semana.py | lucaslk122/Exercicios_Python_estutura_decisao | 51a9699c5d85aa6cfb163d891c56e804a7255634 | [
"MIT"
] | null | null | null | print("Digite um numero inteiro, de 1 a 7 que retornarei qual dia da semana é")
dia = int(input())
if dia == 1:
print("Domingo")
elif dia == 2:
print("Segunda")
elif dia == 3:
print("Terça")
elif dia == 4:
print("Quarta")
elif dia == 5:
print("Quinta")
elif dia == 6:
print("Sexta")
elif dia == 7:
print("Sabado")
else:
print("Dia invalido") | 20.777778 | 79 | 0.593583 | print("Digite um numero inteiro, de 1 a 7 que retornarei qual dia da semana é")
dia = int(input())
if dia == 1:
print("Domingo")
elif dia == 2:
print("Segunda")
elif dia == 3:
print("Terça")
elif dia == 4:
print("Quarta")
elif dia == 5:
print("Quinta")
elif dia == 6:
print("Sexta")
elif dia == 7:
print("Sabado")
else:
print("Dia invalido") | true | true |
1c32dd85b8535f53247c6abf36aeeb797e6fcb1c | 10,005 | py | Python | mealpy/bio_based/VCS.py | Alhassan20/mealpy | 7ed365c5c495ad1c1e066662c90159b3d5e9b8e3 | [
"MIT"
] | 162 | 2020-08-31T10:13:06.000Z | 2022-03-31T09:38:19.000Z | mealpy/bio_based/VCS.py | Alhassan20/mealpy | 7ed365c5c495ad1c1e066662c90159b3d5e9b8e3 | [
"MIT"
] | 51 | 2020-09-13T10:46:31.000Z | 2022-03-30T06:12:08.000Z | mealpy/bio_based/VCS.py | Alhassan20/mealpy | 7ed365c5c495ad1c1e066662c90159b3d5e9b8e3 | [
"MIT"
] | 58 | 2020-09-12T13:29:18.000Z | 2022-03-31T09:38:21.000Z | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 22:07, 11/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy import sum, log1p, array, mean, prod, abs, where
from numpy.random import uniform, normal, choice
from copy import deepcopy
from mealpy.optimizer import Root
class BaseVCS(Root):
"""
My version of: Virus Colony Search (VCS)
A Novel Nature-inspired Algorithm For Optimization: Virus Colony Search
Link:
https://doi.org/10.1016/j.advengsoft.2015.11.004
Notes:
+ Remove all third loop, make algrithm 10 times faster than original
+ In Immune response process, updating whole position instead of updating each variable in position
+ Drop batch-size idea to 3 main process of this algorithm, make it more robust
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, lamda=0.5, xichma=0.3, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.xichma = xichma # Weight factor
self.lamda = lamda # Number of the best will keep
if lamda < 1:
self.n_best = int(lamda * self.pop_size)
else:
self.n_best = int(lamda)
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
pos_list = [item[self.ID_POS] for item in pop]
x_mean = mean(pos_list, axis=0)
for epoch in range(self.epoch):
## Viruses diffusion
for i in range(0, self.pop_size):
xichma = (log1p(epoch + 1) / self.epoch) * (pop[i][self.ID_POS] - g_best[self.ID_POS])
gauss = normal(normal(g_best[self.ID_POS], abs(xichma)))
pos_new = gauss + uniform() * g_best[self.ID_POS] - uniform() * pop[i][self.ID_POS]
pos_new = self.amend_position_random(pos_new)
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
# Batch-size idea
if self.batch_idea:
if (i + 1) % self.batch_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
else:
if (i + 1) % self.pop_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
## Host cells infection
xichma = self.xichma * (1 - (epoch + 1) / self.epoch)
for i in range(0, self.pop_size):
pos_new = x_mean + xichma * normal(0, 1, self.problem_size) ## Basic / simple version, not the original version in the paper
pos_new = self.amend_position_random(pos_new)
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
# Batch-size idea
if self.batch_idea:
if (i + 1) % self.batch_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
else:
if (i + 1) % self.pop_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
## Calculate the weighted mean of the λ best individuals by
pop = sorted(pop, key=lambda item: item[self.ID_FIT])
pos_list = [item[self.ID_POS] for item in pop[:self.n_best]]
factor_down = self.n_best * log1p(self.n_best + 1) - log1p(prod(range(1, self.n_best + 1)))
weight = log1p(self.n_best + 1) / factor_down
weight = weight / self.n_best
x_mean = weight * sum(pos_list, axis=0)
## Immune response
for i in range(0, self.pop_size):
pr = (self.problem_size - i + 1) / self.problem_size
id1, id2 = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)
temp = pop[id1][self.ID_POS] - (pop[id2][self.ID_POS] - pop[i][self.ID_POS]) * uniform()
pos_new = deepcopy(pop[i][self.ID_POS])
pos_new = where(uniform(0, 1, self.problem_size) < pr, pos_new, temp)
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
# Batch-size idea
if self.batch_idea:
if (i + 1) % self.batch_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
else:
if (i + 1) % self.pop_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
## Update elite if a bower becomes fitter than the elite
pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class OriginalVCS(Root):
"""
The original version of: Virus Colony Search (VCS)
A Novel Nature-inspired Algorithm For Optimization: Virus Colony Search
- This is basic version, not the full version of the paper
Link:
https://doi.org/10.1016/j.advengsoft.2015.11.004
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, lamda=0.5, xichma=0.3, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.xichma = xichma # Weight factor
self.lamda = lamda # Number of the best will keep
if lamda < 1:
self.n_best = int(lamda * self.pop_size)
else:
self.n_best = int(lamda)
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
pos_list = [item[self.ID_POS] for item in pop]
x_mean = mean(pos_list, axis=0)
for epoch in range(self.epoch):
## Viruses diffusion
for i in range(0, self.pop_size):
xichma = (log1p(epoch + 1) / self.epoch) * (pop[i][self.ID_POS] - g_best[self.ID_POS])
gauss = array([normal(g_best[self.ID_POS][idx], abs(xichma[idx])) for idx in range(0, self.problem_size)])
pos_new = gauss + uniform() * g_best[self.ID_POS] - uniform() * pop[i][self.ID_POS]
pos_new = self.amend_position_random(pos_new)
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
## Host cells infection
xichma = self.xichma * (1 - (epoch+1)/self.epoch)
for i in range(0, self.pop_size):
pos_new = x_mean + xichma * normal(0, 1, self.problem_size) ## Basic / simple version, not the original version in the paper
pos_new = self.amend_position_random(pos_new)
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
## Calculate the weighted mean of the λ best individuals by
pop = sorted(pop, key=lambda item: item[self.ID_FIT])
pos_list = [item[self.ID_POS] for item in pop[:self.n_best]]
factor_down = self.n_best * log1p(self.n_best + 1) - log1p(prod(range(1, self.n_best + 1)))
weight = log1p(self.n_best + 1) / factor_down
weight = weight / self.n_best
x_mean = weight * sum(pos_list, axis=0)
## Immune response
for i in range(0, self.pop_size):
pr = (self.problem_size - i + 1) / self.problem_size
pos_new = pop[i][self.ID_POS]
for j in range(0, self.problem_size):
if uniform() > pr:
id1, id2 = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)
pos_new[j] = pop[id1][self.ID_POS][j] - (pop[id2][self.ID_POS][j] - pop[i][self.ID_POS][j]) * uniform()
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
## Update elite if a bower becomes fitter than the elite
pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
| 51.572165 | 148 | 0.53963 |
from numpy import sum, log1p, array, mean, prod, abs, where
from numpy.random import uniform, normal, choice
from copy import deepcopy
from mealpy.optimizer import Root
class BaseVCS(Root):
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, lamda=0.5, xichma=0.3, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.xichma = xichma
self.lamda = lamda
if lamda < 1:
self.n_best = int(lamda * self.pop_size)
else:
self.n_best = int(lamda)
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
pos_list = [item[self.ID_POS] for item in pop]
x_mean = mean(pos_list, axis=0)
for epoch in range(self.epoch):
in range(0, self.pop_size):
xichma = (log1p(epoch + 1) / self.epoch) * (pop[i][self.ID_POS] - g_best[self.ID_POS])
gauss = normal(normal(g_best[self.ID_POS], abs(xichma)))
pos_new = gauss + uniform() * g_best[self.ID_POS] - uniform() * pop[i][self.ID_POS]
pos_new = self.amend_position_random(pos_new)
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
if self.batch_idea:
if (i + 1) % self.batch_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
else:
if (i + 1) % self.pop_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.xichma * (1 - (epoch + 1) / self.epoch)
for i in range(0, self.pop_size):
pos_new = x_mean + xichma * normal(0, 1, self.problem_size) fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
if self.batch_idea:
if (i + 1) % self.batch_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
else:
if (i + 1) % self.pop_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
ID_FIT])
pos_list = [item[self.ID_POS] for item in pop[:self.n_best]]
factor_down = self.n_best * log1p(self.n_best + 1) - log1p(prod(range(1, self.n_best + 1)))
weight = log1p(self.n_best + 1) / factor_down
weight = weight / self.n_best
x_mean = weight * sum(pos_list, axis=0)
i in range(0, self.pop_size):
pr = (self.problem_size - i + 1) / self.problem_size
id1, id2 = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)
temp = pop[id1][self.ID_POS] - (pop[id2][self.ID_POS] - pop[i][self.ID_POS]) * uniform()
pos_new = deepcopy(pop[i][self.ID_POS])
pos_new = where(uniform(0, 1, self.problem_size) < pr, pos_new, temp)
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
if self.batch_idea:
if (i + 1) % self.batch_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
else:
if (i + 1) % self.pop_size == 0:
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
n_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class OriginalVCS(Root):
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, lamda=0.5, xichma=0.3, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
self.xichma = xichma
self.lamda = lamda
if lamda < 1:
self.n_best = int(lamda * self.pop_size)
else:
self.n_best = int(lamda)
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
pop, g_best = self.get_sorted_pop_and_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
pos_list = [item[self.ID_POS] for item in pop]
x_mean = mean(pos_list, axis=0)
for epoch in range(self.epoch):
in range(0, self.pop_size):
xichma = (log1p(epoch + 1) / self.epoch) * (pop[i][self.ID_POS] - g_best[self.ID_POS])
gauss = array([normal(g_best[self.ID_POS][idx], abs(xichma[idx])) for idx in range(0, self.problem_size)])
pos_new = gauss + uniform() * g_best[self.ID_POS] - uniform() * pop[i][self.ID_POS]
pos_new = self.amend_position_random(pos_new)
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
self.xichma * (1 - (epoch+1)/self.epoch)
for i in range(0, self.pop_size):
pos_new = x_mean + xichma * normal(0, 1, self.problem_size) fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
ID_FIT])
pos_list = [item[self.ID_POS] for item in pop[:self.n_best]]
factor_down = self.n_best * log1p(self.n_best + 1) - log1p(prod(range(1, self.n_best + 1)))
weight = log1p(self.n_best + 1) / factor_down
weight = weight / self.n_best
x_mean = weight * sum(pos_list, axis=0)
i in range(0, self.pop_size):
pr = (self.problem_size - i + 1) / self.problem_size
pos_new = pop[i][self.ID_POS]
for j in range(0, self.problem_size):
if uniform() > pr:
id1, id2 = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)
pos_new[j] = pop[id1][self.ID_POS][j] - (pop[id2][self.ID_POS][j] - pop[i][self.ID_POS][j]) * uniform()
fit = self.get_fitness_position(pos_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [pos_new, fit]
n_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
| true | true |
1c32de006462a0aee12320703b3287e24affe290 | 2,477 | py | Python | sdk/python/pulumi_azure_nextgen/kusto/v20190121/list_database_principals.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/kusto/v20190121/list_database_principals.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/kusto/v20190121/list_database_principals.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListDatabasePrincipalsResult',
'AwaitableListDatabasePrincipalsResult',
'list_database_principals',
]
@pulumi.output_type
class ListDatabasePrincipalsResult:
"""
The list Kusto database principals operation response.
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.DatabasePrincipalResponseResult']]:
"""
The list of Kusto database principals.
"""
return pulumi.get(self, "value")
class AwaitableListDatabasePrincipalsResult(ListDatabasePrincipalsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDatabasePrincipalsResult(
value=self.value)
def list_database_principals(cluster_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDatabasePrincipalsResult:
"""
Use this data source to access information about an existing resource.
:param str cluster_name: The name of the Kusto cluster.
:param str database_name: The name of the database in the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:kusto/v20190121:listDatabasePrincipals', __args__, opts=opts, typ=ListDatabasePrincipalsResult).value
return AwaitableListDatabasePrincipalsResult(
value=__ret__.value)
| 35.898551 | 152 | 0.69237 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListDatabasePrincipalsResult',
'AwaitableListDatabasePrincipalsResult',
'list_database_principals',
]
@pulumi.output_type
class ListDatabasePrincipalsResult:
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.DatabasePrincipalResponseResult']]:
return pulumi.get(self, "value")
class AwaitableListDatabasePrincipalsResult(ListDatabasePrincipalsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDatabasePrincipalsResult(
value=self.value)
def list_database_principals(cluster_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDatabasePrincipalsResult:
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:kusto/v20190121:listDatabasePrincipals', __args__, opts=opts, typ=ListDatabasePrincipalsResult).value
return AwaitableListDatabasePrincipalsResult(
value=__ret__.value)
| true | true |
1c32de7addcb2901a9b6caafd9376cc644a4b77e | 3,229 | py | Python | PythonBaseDemo/GraphicInterfaceProgrammin/11.8/manipulate_tag.py | CypHelp/TestNewWorldDemo | ee6f73df05756f191c1c56250fa290461fdd1b9a | [
"Apache-2.0"
] | null | null | null | PythonBaseDemo/GraphicInterfaceProgrammin/11.8/manipulate_tag.py | CypHelp/TestNewWorldDemo | ee6f73df05756f191c1c56250fa290461fdd1b9a | [
"Apache-2.0"
] | null | null | null | PythonBaseDemo/GraphicInterfaceProgrammin/11.8/manipulate_tag.py | CypHelp/TestNewWorldDemo | ee6f73df05756f191c1c56250fa290461fdd1b9a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
from tkinter import *
# 创建窗口
root = Tk()
root.title('操作标签')
# 创建并添加Canvas
cv = Canvas(root, background='white', width=620, height=250)
cv.pack(fill=BOTH, expand=YES)
# 绘制一个矩形框
rt = cv.create_rectangle(40, 40, 300, 220,
outline='blue', width=2,
tag = ('t1', 't2', 't3', 'tag4')) # 为该图形项指定标签
# 访问图形项的id,也就是编号
print(rt) # 1
# 绘制一个椭圆
oval = cv.create_oval(350, 50, 580, 200,
fill='yellow', width=0,
tag = ('g1', 'g2', 'g3', 'tag4')) # 为该图形项指定标签
# 访问图形项的id,也就是编号
print(oval) # 2
# 根据指定tag该tag对应的所有图形项
print(cv.find_withtag('tag4')) # (1, 2)
# 获取指定图形项的所有tag
print(cv.gettags(rt)) # ('t1', 't2', 't3', 'tag4')
print(cv.gettags(2)) # ('g1', 'g2', 'g3', 'tag4')
cv.dtag(1, 't1') # 删除id为1的图形项上名为t1的tag
cv.dtag(oval, 'g1') # 删除id为oval的图形项上名为g1的tag
# 获取指定图形项的所有tag
print(cv.gettags(rt)) # ('tag4', 't2', 't3')
print(cv.gettags(2)) # ('tag4', 'g2', 'g3')
# 为所有图形项添加tag
cv.addtag_all('t5')
print(cv.gettags(1)) # ('tag4', 't2', 't3', 't5')
print(cv.gettags(oval)) # ('tag4', 'g2', 'g3', 't5')
# 为指定图形项添加tag
cv.addtag_withtag('t6', 'g2')
# 获取指定图形项的所有tag
print(cv.gettags(1)) # ('tag4', 't2', 't3', 't5')
print(cv.gettags(oval)) # ('tag4', 'g2', 'g3', 't5', 't6')
# 为指定图形项上面的图形项添加tag, t2上面的就是oval图形项
cv.addtag_above('t7', 't2')
print(cv.gettags(1)) # ('tag4', 't2', 't3', 't5')
print(cv.gettags(oval)) # ('tag4', 'g2', 'g3', 't5', 't6', 't7')
# 为指定图形项下面的图形项添加tag, g2下面的就是rt图形项
cv.addtag_below('t8', 'g2')
print(cv.gettags(1)) # ('tag4', 't2', 't3', 't5', 't8')
print(cv.gettags(oval)) # ('tag4', 'g2', 'g3', 't5', 't6', 't7')
# 为最接近指定点的图形项添加tag,最接近360、90的图形项是oval
cv.addtag_closest('t9', 360, 90)
print(cv.gettags(1)) # ('tag4', 't2', 't3', 't5', 't8')
print(cv.gettags(oval)) # ('tag4', 'g2', 'g3', 't5', 't6', 't7', 't9')
# 为位于指定区域内(几乎覆盖整个图形区)的最上面的图形项添加tag
cv.addtag_closest('t10', 30, 30, 600, 240)
print(cv.gettags(1)) # ('tag4', 't2', 't3', 't5', 't8')
print(cv.gettags(oval)) # ('tag4', 'g2', 'g3', 't5', 't6', 't7', 't9', 't10')
# 为与指定区域内重合的最上面的图形项添加tag
cv.addtag_closest('t11', 250, 30, 400, 240)
print(cv.gettags(1)) # ('tag4', 't2', 't3', 't5', 't8')
print(cv.gettags(oval)) # ('tag4', 'g2', 'g3', 't5', 't6', 't7', 't9', 't10', 't11')
root.mainloop()
| 42.486842 | 85 | 0.453081 | true | true | |
1c32deff61557b7a2ba4518c3ad36f40f29a3a21 | 964 | py | Python | kubernetes/test/test_v1_replica_set_list.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | 3 | 2019-05-19T05:05:37.000Z | 2020-03-20T04:56:20.000Z | kubernetes/test/test_v1_replica_set_list.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_replica_set_list.py | Scalr/kubernetes-client-python | 07442bdb76f0876ec96c0b0da6f9c4b06d7e5e38 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_replica_set_list import V1ReplicaSetList
class TestV1ReplicaSetList(unittest.TestCase):
""" V1ReplicaSetList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ReplicaSetList(self):
"""
Test V1ReplicaSetList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_replica_set_list.V1ReplicaSetList()
pass
if __name__ == '__main__':
unittest.main()
| 21.422222 | 105 | 0.709544 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_replica_set_list import V1ReplicaSetList
class TestV1ReplicaSetList(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1ReplicaSetList(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
1c32dfb7d010be6550570f6a03b005a2b2b34c94 | 536 | py | Python | apidocs/search_es.py | kleag/external-knowledge-codegen | 9b6562a549d4bad755fa99a3bee15521fac59e97 | [
"Apache-2.0"
] | null | null | null | apidocs/search_es.py | kleag/external-knowledge-codegen | 9b6562a549d4bad755fa99a3bee15521fac59e97 | [
"Apache-2.0"
] | null | null | null | apidocs/search_es.py | kleag/external-knowledge-codegen | 9b6562a549d4bad755fa99a3bee15521fac59e97 | [
"Apache-2.0"
] | null | null | null | import json
import pprint
import sys
from elasticsearch import Elasticsearch
def get_top_k(query, k=5):
results = es.search(index='python-code', params={"q": query})['hits']['hits'][:k]
for doc in results:
print("Score: ", doc['_score'])
print("Docstring: ", doc['_source']['intent'])
print("Code: ", doc['_source']['snippet'])
print("URL: ", doc['_source']['question_id'])
print("\n\n")
if __name__ == '__main__':
es = Elasticsearch()
query = sys.argv[1]
get_top_k(query)
| 26.8 | 85 | 0.602612 | import json
import pprint
import sys
from elasticsearch import Elasticsearch
def get_top_k(query, k=5):
results = es.search(index='python-code', params={"q": query})['hits']['hits'][:k]
for doc in results:
print("Score: ", doc['_score'])
print("Docstring: ", doc['_source']['intent'])
print("Code: ", doc['_source']['snippet'])
print("URL: ", doc['_source']['question_id'])
print("\n\n")
if __name__ == '__main__':
es = Elasticsearch()
query = sys.argv[1]
get_top_k(query)
| true | true |
1c32e058405e630fce30abbd6463173e1df2acbe | 16,138 | py | Python | optimus/helpers/constants.py | liRONCO11/optimus | 0ca0567267300397c7ba711483c46f94ac265e55 | [
"Apache-2.0"
] | null | null | null | optimus/helpers/constants.py | liRONCO11/optimus | 0ca0567267300397c7ba711483c46f94ac265e55 | [
"Apache-2.0"
] | null | null | null | optimus/helpers/constants.py | liRONCO11/optimus | 0ca0567267300397c7ba711483c46f94ac265e55 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
from optimus.helpers.logger import logger
# Python to PySpark reference
#
# type(None): NullType,
# bool: BooleanType,
# int: LongType,
# float: DoubleType,
# str: StringType,
# bytearray: BinaryType,
# decimal.Decimal: DecimalType,
# datetime.date: DateType,
# datetime.datetime: TimestampType,
# datetime.time: TimestampType,
# Profiler
class Actions(Enum):
"""
Actions that modify a columns/rows.
"""
# COLUMNS
PROFILER_DTYPE = "profiler_dtype"
MATCH = "match"
LOWER = "lower"
UPPER = "upper"
PROPER = "proper"
PAD = "pad"
TRIM = "trim"
REVERSE = "reverse"
REMOVE_ACCENTS = "remove"
REMOVE_SPECIAL_CHARS = "remove"
REMOVE_WHITE_SPACES = "remove"
LEFT = "left"
RIGHT = "right"
MID = "mid"
REPLACE = "replace"
REPLACE_REGEX = "replace"
FILL_NA = "fill_na"
CAST = "cast"
IS_NA = "is_na"
Z_SCORE = "z_score"
NEST = "nest"
UNNEST = "unnest"
SET = "set"
STRING_TO_INDEX = "string_to_index"
DATE_FORMAT = "date_format"
INDEX_TO_STRING = "index_to_string"
MIN_MAX_SCALER = "min_max_scaler"
MAX_ABS_SCALER = "max_abs_scaler"
STANDARD_SCALER = "standard_scaler"
APPLY_COLS = "apply_cols"
YEARS_BETWEEN = "apply_cols"
IMPUTE = "impute"
EXTRACT = "extract"
ABS = "abs"
MATH = "math"
VARIANCE = "variance"
SLICE = "slice"
CLIP = "clip"
DROP = "drop"
KEEP = "keep"
CUT = "cut"
TO_FLOAT = "to_float"
TO_INTEGER = "to_integer"
TO_BOOLEAN = "to_boolean"
TO_STRING = "to_string"
YEAR = "years"
APPEND = "append"
COPY = "copy"
RENAME = "rename"
UNIQUE = "unique"
INFER = "infer"
WORD_TOKENIZE = "word_tokenize"
LENGTH = "length"
GET = "get"
ITEM = "item"
# URL Example http://search.somedb.com:8080/history?era=darkages
# scheme http
# hostname search.somedb.com
# port 8080
# origin http://search.somedb.com:8080
# path /history
# query ?era=darkages
DOMAIN = "domain"
TOP_DOMAIN = "top_domain"
SUB_DOMAIN = "sub_domain"
URL_SCHEME = "url_scheme"
URL_PATH = "url_path"
URL_FILE = "url_file"
URL_QUERY = "url_query"
URL_FRAGMENT = "url_fragment"
HOST = "host"
PORT = "port"
EMAIL_DOMAIN = "email_domain"
EMAIL_USER = "email_user"
FINGERPRINT = "fingerprint"
NGRAM_FINGERPRINT = "ngram_fingerprint"
NGRAMS = "ngrams"
# PHONETIC ENCODING
SOUNDEX = "soundex"
METAPHONE = "metaphone"
DOUBLE_METAPHONE = "double_metaphone"
MATCH_RATING_CODEX = "match_rating_codex"
NYSIIS = "nysiis"
# ROWS
SELECT_ROW = "select_row"
DROP_ROW = "drop_row"
BETWEEN_ROW = "between_drop"
SORT_ROW = "sort_row"
@staticmethod
def list():
return list(map(lambda c: c.value, Actions))
class ProfilerDataTypesQuality(Enum):
MISMATCH = 0
MISSING = 1
MATCH = 2
class ProfilerDataTypes(Enum):
INT = "int"
DECIMAL = "decimal"
STRING = "str"
BOOLEAN = "boolean"
DATETIME = "datetime"
ARRAY = "array"
OBJECT = "object"
GENDER = "gender"
IP = "ip"
URL = "url"
EMAIL = "email"
CREDIT_CARD_NUMBER = "credit_card_number"
ZIP_CODE = "zip_code"
MISSING = "missing"
CATEGORICAL = "categorical"
PHONE_NUMBER = "phone_number"
SOCIAL_SECURITY_NUMBER = "social_security_number"
HTTP_CODE = "http_code"
US_STATE = "us_state"
NULL = "null"
@staticmethod
def list():
return list(map(lambda c: c.value, ProfilerDataTypes))
# NULL = "null"
# MISSING = "missing"
class Schemas(Enum):
S3 = 's3://'
GCS = 'gcs://'
GC = 'gc://'
HTTP = 'http://'
HTTPS = 'https://'
FTP = 'ftp://'
FILE = 'file://'
AZ = 'az://'
ADL = 'adl://'
ABFS = 'abfs://'
@staticmethod
def list():
return list(map(lambda c: c.value, Schemas))
PROFILER_NUMERIC_DTYPES = [ProfilerDataTypes.INT.value, ProfilerDataTypes.DECIMAL.value]
PROFILER_STRING_DTYPES = [ProfilerDataTypes.STRING.value, ProfilerDataTypes.BOOLEAN.value,
ProfilerDataTypes.DATETIME.value, ProfilerDataTypes.ARRAY.value,
ProfilerDataTypes.OBJECT.value, ProfilerDataTypes.GENDER.value,
ProfilerDataTypes.IP.value, ProfilerDataTypes.URL.value,
ProfilerDataTypes.EMAIL.value, ProfilerDataTypes.CREDIT_CARD_NUMBER.value,
ProfilerDataTypes.ZIP_CODE.value, ProfilerDataTypes.PHONE_NUMBER,
ProfilerDataTypes.SOCIAL_SECURITY_NUMBER.value,
ProfilerDataTypes.HTTP_CODE.value, ProfilerDataTypes.US_STATE.value]
# Strings and Function Messages
JUST_CHECKING = "Just check that all necessary environments vars are present..."
STARTING_OPTIMUS = "Transform and Roll out..."
SUCCESS = "Optimus successfully imported. Have fun :)."
CONFIDENCE_LEVEL_CONSTANT = [50, .67], [68, .99], [90, 1.64], [95, 1.96], [99, 2.57]
def print_check_point_config(filesystem):
logger.print(
"Setting checkpoint folder %s. If you are in a cluster initialize Optimus with master='your_ip' as param",
filesystem)
# For Google Colab
JAVA_PATH_COLAB = "/usr/lib/jvm/java-8-openjdk-amd64"
RELATIVE_ERROR = 10000
# Buffer size in rows
BUFFER_SIZE = 500000
US_STATES_NAMES = ["alabama",
"alaska",
"american samoa",
"arizona",
"arkansas",
"california",
"colorado",
"connecticut",
"delaware",
"district of columbia",
"federated states of micronesia",
"florida",
"georgia",
"guam",
"hawaii",
"idaho",
"illinois",
"indiana",
"iowa",
"kansas",
"kentucky",
"louisiana",
"maine",
"marshall islands",
"maryland",
"massachusetts",
"michigan",
"minnesota",
"mississippi",
"missouri",
"montana",
"nebraska",
"nevada",
"new hampshire",
"new jersey",
"new mexico",
"new york",
"north carolina",
"north dakota",
"northern mariana islands",
"ohio",
"oklahoma",
"oregon",
"palau",
"pennsylvania",
"puerto rico",
"rhode island",
"south carolina",
"south dakota",
"tennessee",
"texas",
"utah",
"vermont",
"virgin islands",
"virginia",
"washington",
"west virginia",
"wisconsin",
"wyoming"
]
US_STATES_CODE = [
"al",
"ak",
"as",
"az",
"ar",
"ca",
"co",
"ct",
"de",
"dc",
"fm",
"fl",
"ga",
"gu",
"hi",
"id",
"il",
"in",
"ia",
"ks",
"ky",
"la",
"me",
"mh",
"md",
"ma",
"mi",
"mn",
"ms",
"mo",
"mt",
"ne",
"nv",
"nh",
"nj",
"nm",
"ny",
"nc",
"nd",
"mp",
"oh",
"ok",
"or",
"pw",
"pa",
"pr",
"ri",
"sc",
"sd",
"tn",
"tx",
"ut",
"vt",
"vi",
"va",
"wa",
"wv",
"wi",
"wy"
]
CURRENCIES = {"$": "dollar",
"¢": "cent",
"£": "point",
"€": "euro",
"¥": "yen",
"₹": "indian rupee",
"₽": "ruble",
"元": "yuan",
"¤": "currency",
"₠": "euro-currency",
"₡": "colon",
"₢": "cruzeiro",
"₣": "french franc",
"₤": "lira",
"₥": "mill",
"₦": "naira",
"₧": "peseta",
"₨": "rupee",
"₩": "won",
"₪": "new shequel",
"₫": "dong",
"₭": "kip",
"₮": "tugrik",
"₯": "drachma",
"₰": "german penny",
"₱": "peso",
"₲": "guarani",
"₳": "austral",
"₴": "hryvnia",
"₵": "cedi",
"₶": "livre tournois",
"₸": "tenge",
"₺": "turkish lira",
"₼": "manat",
"৲": "bengali rupee mark",
"৳": "bengali rupee sign",
"૱": "gujarati rupee sign",
"௹": "tamil rupee sign",
"฿": "thai currency bath",
"៛": "khmer currency reil",
"㍐": "square yuan",
"円": "yen character",
"圆": "yen/yuan character variant one",
"圎": "yen/yuan character variant two",
"圓": "yuan character, in hong kong and taiwan",
"圜": "yen/yuan character variant three",
"원": "won character",
"﷼": "rial sign",
"$": "fullwidth dollar sign",
"¢": "fullwidth cent sign",
"£": "fullwidth pound sign",
"¥": "fullwidth yen sign",
"₩": "fullwidth won sign"}
PYTHON_SHORT_TYPES = {"string": "string",
"str": "string",
"integer": "int",
"int": "int",
"float": "float",
"double": "double",
"bool": "boolean",
"boolean": "boolean",
"array": "array",
"null": "null"
}
PYTHON_TYPES = {"string": str, "int": int, "float": float, "boolean": bool}
PROFILER_COLUMN_TYPES = {"categorical", "numeric", "date", "null", "array", "binary"}
PYTHON_TO_PROFILER = {"string": "categorical", "boolean": "categorical", "int": "numeric", "float": "numeric",
"decimal": "numeric", "date": "date", "array": "array", "binary": "binary", "null": "null"}
PROFILER_CATEGORICAL_DTYPES = [ProfilerDataTypes.BOOLEAN.value, ProfilerDataTypes.ZIP_CODE.value,
ProfilerDataTypes.STRING.value, ProfilerDataTypes.HTTP_CODE.value,
ProfilerDataTypes.INT.value, ProfilerDataTypes.IP.value, ProfilerDataTypes.GENDER.value,
ProfilerDataTypes.PHONE_NUMBER.value, ProfilerDataTypes.US_STATE.value,
ProfilerDataTypes.SOCIAL_SECURITY_NUMBER.value
]
CONTRACTIONS = [
("a'ight", "alright"),
("ain't", "am not"),
("amn't", "am not"),
("arencha", "are not you"),
("aren't", "are not"),
("'bout", "about"),
("cannot", "can not"),
("can't", "cannot"),
("cap'n", "captain"),
("'cause", "because"),
("'cept", "except"),
("could've", "could have"),
("couldn't", "could not"),
("couldn't've", "could not have"),
("dammit", "damn it"),
("daren't", "dare not"),
("daresn't", "dare not"),
("dasn't", "dare not"),
("didn't", "did not"),
("doesn't", "does not"),
("don't", "do not "),
("dunno", "do not know"),
("d'ye", "do you "),
("e'en", "even"),
("e'er", "ever"),
("'em", "them"),
("everybody's", "everybody is"),
("everyone's", "everyone is"),
("fo'c'sle", "forecastle"),
("'gainst", "against"),
("g'day", "good day"),
("gimme", "give me"),
("giv'n", "given"),
("gonna", "going to"),
("gon't", "go not "),
("gotta", "got to"),
("hadn't", "had not"),
("had've", "had have"),
("hasn't", "has not"),
("haven't", "have not"),
("he'd", "he would"),
("he'll", "he will"),
("helluva", "hell of a"),
("he's", "he is"),
("here's", "here is"),
("he've", "he have"),
("how'd", "how would"),
("howdy", "how do you do "),
("how'll", "how will"),
("how're", "how are"),
("how's", "how is "),
("I'd", "I would"),
("I'd've", "I would have"),
("I'll", "I shall / I will"),
("I'm", "I am"),
("I'm'a", "I am about to"),
("I'm'o", "I am going to"),
("innit", "is it not"),
("I've", "I have"),
("isn't", "is not"),
("it'd", "it would"),
("it'll", "it will"),
("it's", "it is"),
("iunno", "I don't know"),
("kinda", "kind of"),
("let's", "let us"),
("ma'am", "madam"),
("mayn't", "may not"),
("may've", "may have"),
("methinks", "me thinks"),
("mightn't", "might not"),
("might've", "might have"),
("mustn't", "must not"),
("mustn't've", "must not have"),
("must've", "must have"),
("'neath", "beneath"),
("needn't", "need not"),
("nal", "and all"),
("ne'er", "never"),
("o'clock", "of the clock"),
("o'er", "over"),
("ol'", "old"),
("oughtn't", "ought not"),
("'round", "around"),
("'s", "is"),
("shalln't", "shall not"),
("shan't", "shall not"),
("she'd", "she would"),
("she'll", "she will"),
("she's", "she is"),
("should've", "should have"),
("shouldn't", "should not"),
("shouldn't've", "should not have"),
("somebody's", "somebody is"),
("someone's", "someone is"),
("something's", "something is"),
("so're", "so are"),
("so's", "so is"),
("so've", "so have"),
("that'll", "that will"),
("that're", "that are"),
("that's", "that is"),
("that'd", "that would "),
("there'd", "there would"),
("there'll", "there will"),
("there're", "there are"),
("there's", "there is"),
("these're", "these are"),
("these've", "these have"),
("they'd", "they would"),
("they'll", "they will"),
("they're", "they are "),
("they've", "they have"),
("this's", "this is"),
("those're", "those are"),
("those've", "those have"),
("'thout", "without"),
("'til", "until"),
("'tis", "it is"),
("to've", "to have"),
("'twas", "it was"),
("'tween", "between"),
("'twere", "it were"),
("wanna", "want to"),
("wasn't", "was not"),
("we'd", "we would"),
("we'd've", "we would have"),
("we'll", "we will"),
("we're", "we are"),
("we've", "we have"),
("weren't", "were not"),
("whatcha", "what are you"),
("what'd", "what did"),
("what'll", "what will"),
("what're", "what are"),
("what's", "what is"),
("what've", "what have"),
("when's", "when is"),
("where'd", "where did"),
("where'll", "where will"),
("where're", "where are"),
("where's", "where is "),
("where've", "where have"),
("which'd", "which would"),
("which'll", "which will"),
("which're", "which are"),
("which's", "which is"),
("which've", "which have"),
("who'd", "who would "),
("who'd've", "who would have"),
("who'll", "who will"),
("who're", "who are"),
("who's", "who is "),
("who've", "who have"),
("why'd", "why did"),
("why're", "why are"),
("why's", "why is "),
("willn't", "will not"),
("won't", "will not"),
("wonnot", "will not "),
("would've", "would have"),
("wouldn't", "would not"),
("wouldn't've", "would not have"),
("y'all", "you all"),
("y'all'd've", "you all would have"),
("y'all'd'n've", "you all would not have "),
("y'all're", "you all are "),
("y'at", "you at"),
("yes'm", "yes madam"),
("yes ma'am", "yes madam"),
("yessir", "yes sir"),
("you'd", "you would"),
("you'll", "you will"),
("you're", "you are"),
("you've", "you have")
]
| 27.680961 | 119 | 0.468212 | from enum import Enum
from optimus.helpers.logger import logger
class Actions(Enum):
PROFILER_DTYPE = "profiler_dtype"
MATCH = "match"
LOWER = "lower"
UPPER = "upper"
PROPER = "proper"
PAD = "pad"
TRIM = "trim"
REVERSE = "reverse"
REMOVE_ACCENTS = "remove"
REMOVE_SPECIAL_CHARS = "remove"
REMOVE_WHITE_SPACES = "remove"
LEFT = "left"
RIGHT = "right"
MID = "mid"
REPLACE = "replace"
REPLACE_REGEX = "replace"
FILL_NA = "fill_na"
CAST = "cast"
IS_NA = "is_na"
Z_SCORE = "z_score"
NEST = "nest"
UNNEST = "unnest"
SET = "set"
STRING_TO_INDEX = "string_to_index"
DATE_FORMAT = "date_format"
INDEX_TO_STRING = "index_to_string"
MIN_MAX_SCALER = "min_max_scaler"
MAX_ABS_SCALER = "max_abs_scaler"
STANDARD_SCALER = "standard_scaler"
APPLY_COLS = "apply_cols"
YEARS_BETWEEN = "apply_cols"
IMPUTE = "impute"
EXTRACT = "extract"
ABS = "abs"
MATH = "math"
VARIANCE = "variance"
SLICE = "slice"
CLIP = "clip"
DROP = "drop"
KEEP = "keep"
CUT = "cut"
TO_FLOAT = "to_float"
TO_INTEGER = "to_integer"
TO_BOOLEAN = "to_boolean"
TO_STRING = "to_string"
YEAR = "years"
APPEND = "append"
COPY = "copy"
RENAME = "rename"
UNIQUE = "unique"
INFER = "infer"
WORD_TOKENIZE = "word_tokenize"
LENGTH = "length"
GET = "get"
ITEM = "item"
DOMAIN = "domain"
TOP_DOMAIN = "top_domain"
SUB_DOMAIN = "sub_domain"
URL_SCHEME = "url_scheme"
URL_PATH = "url_path"
URL_FILE = "url_file"
URL_QUERY = "url_query"
URL_FRAGMENT = "url_fragment"
HOST = "host"
PORT = "port"
EMAIL_DOMAIN = "email_domain"
EMAIL_USER = "email_user"
FINGERPRINT = "fingerprint"
NGRAM_FINGERPRINT = "ngram_fingerprint"
NGRAMS = "ngrams"
SOUNDEX = "soundex"
METAPHONE = "metaphone"
DOUBLE_METAPHONE = "double_metaphone"
MATCH_RATING_CODEX = "match_rating_codex"
NYSIIS = "nysiis"
SELECT_ROW = "select_row"
DROP_ROW = "drop_row"
BETWEEN_ROW = "between_drop"
SORT_ROW = "sort_row"
@staticmethod
def list():
return list(map(lambda c: c.value, Actions))
class ProfilerDataTypesQuality(Enum):
MISMATCH = 0
MISSING = 1
MATCH = 2
class ProfilerDataTypes(Enum):
INT = "int"
DECIMAL = "decimal"
STRING = "str"
BOOLEAN = "boolean"
DATETIME = "datetime"
ARRAY = "array"
OBJECT = "object"
GENDER = "gender"
IP = "ip"
URL = "url"
EMAIL = "email"
CREDIT_CARD_NUMBER = "credit_card_number"
ZIP_CODE = "zip_code"
MISSING = "missing"
CATEGORICAL = "categorical"
PHONE_NUMBER = "phone_number"
SOCIAL_SECURITY_NUMBER = "social_security_number"
HTTP_CODE = "http_code"
US_STATE = "us_state"
NULL = "null"
@staticmethod
def list():
return list(map(lambda c: c.value, ProfilerDataTypes))
class Schemas(Enum):
S3 = 's3://'
GCS = 'gcs://'
GC = 'gc://'
HTTP = 'http://'
HTTPS = 'https://'
FTP = 'ftp://'
FILE = 'file://'
AZ = 'az://'
ADL = 'adl://'
ABFS = 'abfs://'
@staticmethod
def list():
return list(map(lambda c: c.value, Schemas))
PROFILER_NUMERIC_DTYPES = [ProfilerDataTypes.INT.value, ProfilerDataTypes.DECIMAL.value]
PROFILER_STRING_DTYPES = [ProfilerDataTypes.STRING.value, ProfilerDataTypes.BOOLEAN.value,
ProfilerDataTypes.DATETIME.value, ProfilerDataTypes.ARRAY.value,
ProfilerDataTypes.OBJECT.value, ProfilerDataTypes.GENDER.value,
ProfilerDataTypes.IP.value, ProfilerDataTypes.URL.value,
ProfilerDataTypes.EMAIL.value, ProfilerDataTypes.CREDIT_CARD_NUMBER.value,
ProfilerDataTypes.ZIP_CODE.value, ProfilerDataTypes.PHONE_NUMBER,
ProfilerDataTypes.SOCIAL_SECURITY_NUMBER.value,
ProfilerDataTypes.HTTP_CODE.value, ProfilerDataTypes.US_STATE.value]
JUST_CHECKING = "Just check that all necessary environments vars are present..."
STARTING_OPTIMUS = "Transform and Roll out..."
SUCCESS = "Optimus successfully imported. Have fun :)."
CONFIDENCE_LEVEL_CONSTANT = [50, .67], [68, .99], [90, 1.64], [95, 1.96], [99, 2.57]
def print_check_point_config(filesystem):
logger.print(
"Setting checkpoint folder %s. If you are in a cluster initialize Optimus with master='your_ip' as param",
filesystem)
JAVA_PATH_COLAB = "/usr/lib/jvm/java-8-openjdk-amd64"
RELATIVE_ERROR = 10000
BUFFER_SIZE = 500000
US_STATES_NAMES = ["alabama",
"alaska",
"american samoa",
"arizona",
"arkansas",
"california",
"colorado",
"connecticut",
"delaware",
"district of columbia",
"federated states of micronesia",
"florida",
"georgia",
"guam",
"hawaii",
"idaho",
"illinois",
"indiana",
"iowa",
"kansas",
"kentucky",
"louisiana",
"maine",
"marshall islands",
"maryland",
"massachusetts",
"michigan",
"minnesota",
"mississippi",
"missouri",
"montana",
"nebraska",
"nevada",
"new hampshire",
"new jersey",
"new mexico",
"new york",
"north carolina",
"north dakota",
"northern mariana islands",
"ohio",
"oklahoma",
"oregon",
"palau",
"pennsylvania",
"puerto rico",
"rhode island",
"south carolina",
"south dakota",
"tennessee",
"texas",
"utah",
"vermont",
"virgin islands",
"virginia",
"washington",
"west virginia",
"wisconsin",
"wyoming"
]
US_STATES_CODE = [
"al",
"ak",
"as",
"az",
"ar",
"ca",
"co",
"ct",
"de",
"dc",
"fm",
"fl",
"ga",
"gu",
"hi",
"id",
"il",
"in",
"ia",
"ks",
"ky",
"la",
"me",
"mh",
"md",
"ma",
"mi",
"mn",
"ms",
"mo",
"mt",
"ne",
"nv",
"nh",
"nj",
"nm",
"ny",
"nc",
"nd",
"mp",
"oh",
"ok",
"or",
"pw",
"pa",
"pr",
"ri",
"sc",
"sd",
"tn",
"tx",
"ut",
"vt",
"vi",
"va",
"wa",
"wv",
"wi",
"wy"
]
CURRENCIES = {"$": "dollar",
"¢": "cent",
"£": "point",
"€": "euro",
"¥": "yen",
"₹": "indian rupee",
"₽": "ruble",
"元": "yuan",
"¤": "currency",
"₠": "euro-currency",
"₡": "colon",
"₢": "cruzeiro",
"₣": "french franc",
"₤": "lira",
"₥": "mill",
"₦": "naira",
"₧": "peseta",
"₨": "rupee",
"₩": "won",
"₪": "new shequel",
"₫": "dong",
"₭": "kip",
"₮": "tugrik",
"₯": "drachma",
"₰": "german penny",
"₱": "peso",
"₲": "guarani",
"₳": "austral",
"₴": "hryvnia",
"₵": "cedi",
"₶": "livre tournois",
"₸": "tenge",
"₺": "turkish lira",
"₼": "manat",
"৲": "bengali rupee mark",
"৳": "bengali rupee sign",
"૱": "gujarati rupee sign",
"௹": "tamil rupee sign",
"฿": "thai currency bath",
"៛": "khmer currency reil",
"㍐": "square yuan",
"円": "yen character",
"圆": "yen/yuan character variant one",
"圎": "yen/yuan character variant two",
"圓": "yuan character, in hong kong and taiwan",
"圜": "yen/yuan character variant three",
"원": "won character",
"﷼": "rial sign",
"$": "fullwidth dollar sign",
"¢": "fullwidth cent sign",
"£": "fullwidth pound sign",
"¥": "fullwidth yen sign",
"₩": "fullwidth won sign"}
PYTHON_SHORT_TYPES = {"string": "string",
"str": "string",
"integer": "int",
"int": "int",
"float": "float",
"double": "double",
"bool": "boolean",
"boolean": "boolean",
"array": "array",
"null": "null"
}
PYTHON_TYPES = {"string": str, "int": int, "float": float, "boolean": bool}
PROFILER_COLUMN_TYPES = {"categorical", "numeric", "date", "null", "array", "binary"}
PYTHON_TO_PROFILER = {"string": "categorical", "boolean": "categorical", "int": "numeric", "float": "numeric",
"decimal": "numeric", "date": "date", "array": "array", "binary": "binary", "null": "null"}
PROFILER_CATEGORICAL_DTYPES = [ProfilerDataTypes.BOOLEAN.value, ProfilerDataTypes.ZIP_CODE.value,
ProfilerDataTypes.STRING.value, ProfilerDataTypes.HTTP_CODE.value,
ProfilerDataTypes.INT.value, ProfilerDataTypes.IP.value, ProfilerDataTypes.GENDER.value,
ProfilerDataTypes.PHONE_NUMBER.value, ProfilerDataTypes.US_STATE.value,
ProfilerDataTypes.SOCIAL_SECURITY_NUMBER.value
]
CONTRACTIONS = [
("a'ight", "alright"),
("ain't", "am not"),
("amn't", "am not"),
("arencha", "are not you"),
("aren't", "are not"),
("'bout", "about"),
("cannot", "can not"),
("can't", "cannot"),
("cap'n", "captain"),
("'cause", "because"),
("'cept", "except"),
("could've", "could have"),
("couldn't", "could not"),
("couldn't've", "could not have"),
("dammit", "damn it"),
("daren't", "dare not"),
("daresn't", "dare not"),
("dasn't", "dare not"),
("didn't", "did not"),
("doesn't", "does not"),
("don't", "do not "),
("dunno", "do not know"),
("d'ye", "do you "),
("e'en", "even"),
("e'er", "ever"),
("'em", "them"),
("everybody's", "everybody is"),
("everyone's", "everyone is"),
("fo'c'sle", "forecastle"),
("'gainst", "against"),
("g'day", "good day"),
("gimme", "give me"),
("giv'n", "given"),
("gonna", "going to"),
("gon't", "go not "),
("gotta", "got to"),
("hadn't", "had not"),
("had've", "had have"),
("hasn't", "has not"),
("haven't", "have not"),
("he'd", "he would"),
("he'll", "he will"),
("helluva", "hell of a"),
("he's", "he is"),
("here's", "here is"),
("he've", "he have"),
("how'd", "how would"),
("howdy", "how do you do "),
("how'll", "how will"),
("how're", "how are"),
("how's", "how is "),
("I'd", "I would"),
("I'd've", "I would have"),
("I'll", "I shall / I will"),
("I'm", "I am"),
("I'm'a", "I am about to"),
("I'm'o", "I am going to"),
("innit", "is it not"),
("I've", "I have"),
("isn't", "is not"),
("it'd", "it would"),
("it'll", "it will"),
("it's", "it is"),
("iunno", "I don't know"),
("kinda", "kind of"),
("let's", "let us"),
("ma'am", "madam"),
("mayn't", "may not"),
("may've", "may have"),
("methinks", "me thinks"),
("mightn't", "might not"),
("might've", "might have"),
("mustn't", "must not"),
("mustn't've", "must not have"),
("must've", "must have"),
("'neath", "beneath"),
("needn't", "need not"),
("nal", "and all"),
("ne'er", "never"),
("o'clock", "of the clock"),
("o'er", "over"),
("ol'", "old"),
("oughtn't", "ought not"),
("'round", "around"),
("'s", "is"),
("shalln't", "shall not"),
("shan't", "shall not"),
("she'd", "she would"),
("she'll", "she will"),
("she's", "she is"),
("should've", "should have"),
("shouldn't", "should not"),
("shouldn't've", "should not have"),
("somebody's", "somebody is"),
("someone's", "someone is"),
("something's", "something is"),
("so're", "so are"),
("so's", "so is"),
("so've", "so have"),
("that'll", "that will"),
("that're", "that are"),
("that's", "that is"),
("that'd", "that would "),
("there'd", "there would"),
("there'll", "there will"),
("there're", "there are"),
("there's", "there is"),
("these're", "these are"),
("these've", "these have"),
("they'd", "they would"),
("they'll", "they will"),
("they're", "they are "),
("they've", "they have"),
("this's", "this is"),
("those're", "those are"),
("those've", "those have"),
("'thout", "without"),
("'til", "until"),
("'tis", "it is"),
("to've", "to have"),
("'twas", "it was"),
("'tween", "between"),
("'twere", "it were"),
("wanna", "want to"),
("wasn't", "was not"),
("we'd", "we would"),
("we'd've", "we would have"),
("we'll", "we will"),
("we're", "we are"),
("we've", "we have"),
("weren't", "were not"),
("whatcha", "what are you"),
("what'd", "what did"),
("what'll", "what will"),
("what're", "what are"),
("what's", "what is"),
("what've", "what have"),
("when's", "when is"),
("where'd", "where did"),
("where'll", "where will"),
("where're", "where are"),
("where's", "where is "),
("where've", "where have"),
("which'd", "which would"),
("which'll", "which will"),
("which're", "which are"),
("which's", "which is"),
("which've", "which have"),
("who'd", "who would "),
("who'd've", "who would have"),
("who'll", "who will"),
("who're", "who are"),
("who's", "who is "),
("who've", "who have"),
("why'd", "why did"),
("why're", "why are"),
("why's", "why is "),
("willn't", "will not"),
("won't", "will not"),
("wonnot", "will not "),
("would've", "would have"),
("wouldn't", "would not"),
("wouldn't've", "would not have"),
("y'all", "you all"),
("y'all'd've", "you all would have"),
("y'all'd'n've", "you all would not have "),
("y'all're", "you all are "),
("y'at", "you at"),
("yes'm", "yes madam"),
("yes ma'am", "yes madam"),
("yessir", "yes sir"),
("you'd", "you would"),
("you'll", "you will"),
("you're", "you are"),
("you've", "you have")
]
| true | true |
1c32e11606263dcf39250cd485f75133e0df204e | 931 | py | Python | surveyapp/app/app/database.py | danielSilva21/interlinker-service-augmenter | 54b6ab3b9f2c21fd4779a03839842534544b9f9b | [
"MIT"
] | null | null | null | surveyapp/app/app/database.py | danielSilva21/interlinker-service-augmenter | 54b6ab3b9f2c21fd4779a03839842534544b9f9b | [
"MIT"
] | null | null | null | surveyapp/app/app/database.py | danielSilva21/interlinker-service-augmenter | 54b6ab3b9f2c21fd4779a03839842534544b9f9b | [
"MIT"
] | null | null | null | import logging
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorCollection
from app.config import settings
import os
MAX_CONNECTIONS_COUNT = int(os.getenv("MAX_CONNECTIONS_COUNT", 10))
MIN_CONNECTIONS_COUNT = int(os.getenv("MIN_CONNECTIONS_COUNT", 10))
class DataBase:
client: AsyncIOMotorClient = None
db = DataBase()
async def get_collection() -> AsyncIOMotorCollection:
return db.client[settings.MONGODB_DATABASE][settings.COLLECTION_NAME]
async def connect_to_mongo():
logging.info("Connecting to database...")
db.client = AsyncIOMotorClient(settings.MONGODB_URL,
maxPoolSize=MAX_CONNECTIONS_COUNT,
minPoolSize=MIN_CONNECTIONS_COUNT)
logging.info("Database connected!")
async def close_mongo_connection():
logging.info("Closing database connection...")
db.client.close()
logging.info("Database closed!") | 33.25 | 74 | 0.728249 | import logging
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorCollection
from app.config import settings
import os
MAX_CONNECTIONS_COUNT = int(os.getenv("MAX_CONNECTIONS_COUNT", 10))
MIN_CONNECTIONS_COUNT = int(os.getenv("MIN_CONNECTIONS_COUNT", 10))
class DataBase:
client: AsyncIOMotorClient = None
db = DataBase()
async def get_collection() -> AsyncIOMotorCollection:
return db.client[settings.MONGODB_DATABASE][settings.COLLECTION_NAME]
async def connect_to_mongo():
logging.info("Connecting to database...")
db.client = AsyncIOMotorClient(settings.MONGODB_URL,
maxPoolSize=MAX_CONNECTIONS_COUNT,
minPoolSize=MIN_CONNECTIONS_COUNT)
logging.info("Database connected!")
async def close_mongo_connection():
logging.info("Closing database connection...")
db.client.close()
logging.info("Database closed!") | true | true |
1c32e27a764119e0c51f69ff7e5bfdd210ef0fc7 | 4,886 | py | Python | rlpyt/samplers/collectors.py | tristandeleu/rlpyt | 22eccb4e2b33d3c52947a27b6d300b575e36a3ea | [
"MIT"
] | 1 | 2021-04-24T16:42:18.000Z | 2021-04-24T16:42:18.000Z | rlpyt/samplers/collectors.py | tristandeleu/rlpyt | 22eccb4e2b33d3c52947a27b6d300b575e36a3ea | [
"MIT"
] | null | null | null | rlpyt/samplers/collectors.py | tristandeleu/rlpyt | 22eccb4e2b33d3c52947a27b6d300b575e36a3ea | [
"MIT"
] | null | null | null |
import numpy as np
from rlpyt.agents.base import AgentInputs
from rlpyt.utils.buffer import buffer_from_example, torchify_buffer, numpify_buffer
from rlpyt.utils.logging import logger
class BaseCollector:
"""Class that steps environments, possibly in worker process."""
def __init__(
self,
rank,
envs,
samples_np,
batch_T,
TrajInfoCls,
agent=None, # Present or not, depending on collector class.
sync=None,
step_buffer_np=None,
global_B=1,
env_ranks=None,
):
self.rank = rank
self.envs = envs
self.samples_np = samples_np
self.batch_T = batch_T
self.TrajInfoCls = TrajInfoCls
self.agent = agent
self.sync = sync
self.step_buffer_np = step_buffer_np
self.global_B = global_B
self.env_ranks = env_ranks
def start_envs(self):
"""e.g. calls reset() on every env."""
raise NotImplementedError
def start_agent(self):
"""In CPU-collectors, call ``agent.collector_initialize()`` e.g. to set up
vector epsilon-greedy, and reset the agent.
"""
if getattr(self, "agent", None) is not None: # Not in GPU collectors.
self.agent.collector_initialize(
global_B=self.global_B, # Args used e.g. for vector epsilon greedy.
env_ranks=self.env_ranks,
)
self.agent.reset()
self.agent.sample_mode(itr=0)
def collect_batch(self, agent_inputs, traj_infos):
"""Main data collection loop."""
raise NotImplementedError
def reset_if_needed(self, agent_inputs):
"""Reset agent and or env as needed, if doing between batches."""
pass
class BaseEvalCollector:
"""Collectors for offline agent evalution; not to record intermediate samples."""
def __init__(
self,
rank,
envs,
TrajInfoCls,
traj_infos_queue,
max_T,
agent=None,
sync=None,
step_buffer_np=None,
):
self.rank = rank
self.envs = envs
self.TrajInfoCls = TrajInfoCls
self.traj_infos_queue = traj_infos_queue
self.max_T = max_T
self.agent = agent
self.sync = sync
self.step_buffer_np = step_buffer_np
def collect_evaluation(self):
"""Run agent evaluation in environment and return completed trajectory
infos."""
raise NotImplementedError
class DecorrelatingStartCollector(BaseCollector):
"""Collector which can step all environments through a random number of random
actions during startup, to decorrelate the states in training batches.
"""
def start_envs(self, max_decorrelation_steps=0):
"""Calls ``reset()`` on every environment instance, then steps each
one through a random number of random actions, and returns the
resulting agent_inputs buffer (`observation`, `prev_action`,
`prev_reward`)."""
traj_infos = [self.TrajInfoCls() for _ in self.envs]
observations = list()
for env in self.envs:
observations.append(env.reset())
observation = buffer_from_example(observations[0], len(self.envs))
for b, obs in enumerate(observations):
observation[b] = obs # numpy array or namedarraytuple
prev_action = np.stack([env.action_space.null_value()
for env in self.envs])
prev_reward = np.zeros(len(self.envs), dtype="float32")
if self.rank == 0:
logger.log("Sampler decorrelating envs, max steps: "
f"{max_decorrelation_steps}")
if max_decorrelation_steps != 0:
for b, env in enumerate(self.envs):
n_steps = 1 + int(np.random.rand() * max_decorrelation_steps)
for _ in range(n_steps):
a = env.action_space.sample()
o, r, d, info = env.step(a)
traj_infos[b].step(o, a, r, d, None, info)
if getattr(info, "traj_done", d):
o = env.reset()
traj_infos[b] = self.TrajInfoCls()
if d:
a = env.action_space.null_value()
r = 0
observation[b] = o
prev_action[b] = a
prev_reward[b] = r
# For action-server samplers.
if hasattr(self, "step_buffer_np") and self.step_buffer_np is not None:
self.step_buffer_np.observation[:] = observation
self.step_buffer_np.action[:] = prev_action
self.step_buffer_np.reward[:] = prev_reward
return AgentInputs(observation, prev_action, prev_reward), traj_infos
| 36.192593 | 85 | 0.586779 |
import numpy as np
from rlpyt.agents.base import AgentInputs
from rlpyt.utils.buffer import buffer_from_example, torchify_buffer, numpify_buffer
from rlpyt.utils.logging import logger
class BaseCollector:
def __init__(
self,
rank,
envs,
samples_np,
batch_T,
TrajInfoCls,
agent=None,
sync=None,
step_buffer_np=None,
global_B=1,
env_ranks=None,
):
self.rank = rank
self.envs = envs
self.samples_np = samples_np
self.batch_T = batch_T
self.TrajInfoCls = TrajInfoCls
self.agent = agent
self.sync = sync
self.step_buffer_np = step_buffer_np
self.global_B = global_B
self.env_ranks = env_ranks
def start_envs(self):
raise NotImplementedError
def start_agent(self):
if getattr(self, "agent", None) is not None:
self.agent.collector_initialize(
global_B=self.global_B,
env_ranks=self.env_ranks,
)
self.agent.reset()
self.agent.sample_mode(itr=0)
def collect_batch(self, agent_inputs, traj_infos):
raise NotImplementedError
def reset_if_needed(self, agent_inputs):
pass
class BaseEvalCollector:
def __init__(
self,
rank,
envs,
TrajInfoCls,
traj_infos_queue,
max_T,
agent=None,
sync=None,
step_buffer_np=None,
):
self.rank = rank
self.envs = envs
self.TrajInfoCls = TrajInfoCls
self.traj_infos_queue = traj_infos_queue
self.max_T = max_T
self.agent = agent
self.sync = sync
self.step_buffer_np = step_buffer_np
def collect_evaluation(self):
raise NotImplementedError
class DecorrelatingStartCollector(BaseCollector):
def start_envs(self, max_decorrelation_steps=0):
traj_infos = [self.TrajInfoCls() for _ in self.envs]
observations = list()
for env in self.envs:
observations.append(env.reset())
observation = buffer_from_example(observations[0], len(self.envs))
for b, obs in enumerate(observations):
observation[b] = obs
prev_action = np.stack([env.action_space.null_value()
for env in self.envs])
prev_reward = np.zeros(len(self.envs), dtype="float32")
if self.rank == 0:
logger.log("Sampler decorrelating envs, max steps: "
f"{max_decorrelation_steps}")
if max_decorrelation_steps != 0:
for b, env in enumerate(self.envs):
n_steps = 1 + int(np.random.rand() * max_decorrelation_steps)
for _ in range(n_steps):
a = env.action_space.sample()
o, r, d, info = env.step(a)
traj_infos[b].step(o, a, r, d, None, info)
if getattr(info, "traj_done", d):
o = env.reset()
traj_infos[b] = self.TrajInfoCls()
if d:
a = env.action_space.null_value()
r = 0
observation[b] = o
prev_action[b] = a
prev_reward[b] = r
if hasattr(self, "step_buffer_np") and self.step_buffer_np is not None:
self.step_buffer_np.observation[:] = observation
self.step_buffer_np.action[:] = prev_action
self.step_buffer_np.reward[:] = prev_reward
return AgentInputs(observation, prev_action, prev_reward), traj_infos
| true | true |
1c32e4313ba1e38678b13425e96ab2875bc04622 | 1,760 | py | Python | graphrole/graph/interface/__init__.py | dkaslovsky/GraphRole | 5336a9c3c42f8f89565686ed44e321ce6cce1a69 | [
"MIT"
] | 60 | 2019-03-07T15:15:49.000Z | 2022-03-25T19:21:18.000Z | graphrole/graph/interface/__init__.py | dkaslovsky/GraphRole | 5336a9c3c42f8f89565686ed44e321ce6cce1a69 | [
"MIT"
] | 5 | 2020-08-24T08:51:40.000Z | 2021-12-08T06:50:47.000Z | graphrole/graph/interface/__init__.py | dkaslovsky/GraphRole | 5336a9c3c42f8f89565686ed44e321ce6cce1a69 | [
"MIT"
] | 17 | 2019-08-02T04:15:50.000Z | 2022-03-16T21:32:23.000Z | from typing import List, TypeVar
from graphrole.graph.interface.base import BaseGraphInterface
from graphrole.graph.interface.networkx import NetworkxInterface
# IgraphInterface should not be imported if igraph is not installed
try:
from graphrole.graph.interface.igraph import IgraphInterface
except ImportError:
pass
INTERFACES = {
'networkx': NetworkxInterface,
# lazy eval in case IgraphInterface was not imported
# pylint: disable=unnecessary-lambda
'igraph': lambda x: IgraphInterface(x),
}
# define types
GraphInterfaceType = TypeVar('GraphInterfaceType', bound=BaseGraphInterface)
GraphLibInstance = TypeVar('GraphLibInstance', *list(INTERFACES.keys()))
def get_supported_graph_libraries() -> List[str]:
"""
Return list of supported graph libraries
"""
return list(INTERFACES.keys())
# NOTE: There are many ways of determining the module/package/type
# of an object: inspect, type, isinstance. Here we access
# the __module__ property directly since isinstance returns
# a bool which will not facilitate a dict lookup, type
# does not support inheritance, and inspect.getmodule returns
# a module ojbect that is specific to the subclass. There is
# likely a better approach and this should be futher investigated.
def get_interface(G: GraphLibInstance) -> GraphInterfaceType:
"""
Return subclass of Graph initialized with G
:param G: graph object from a supported graph libraries
"""
try:
module = G.__module__
package = module.split('.')[0]
except (AttributeError, IndexError):
return None
try:
graph_interface = INTERFACES[package]
except KeyError:
return None
return graph_interface(G)
| 32.592593 | 76 | 0.725568 | from typing import List, TypeVar
from graphrole.graph.interface.base import BaseGraphInterface
from graphrole.graph.interface.networkx import NetworkxInterface
try:
from graphrole.graph.interface.igraph import IgraphInterface
except ImportError:
pass
INTERFACES = {
'networkx': NetworkxInterface,
'igraph': lambda x: IgraphInterface(x),
}
GraphInterfaceType = TypeVar('GraphInterfaceType', bound=BaseGraphInterface)
GraphLibInstance = TypeVar('GraphLibInstance', *list(INTERFACES.keys()))
def get_supported_graph_libraries() -> List[str]:
return list(INTERFACES.keys())
def get_interface(G: GraphLibInstance) -> GraphInterfaceType:
try:
module = G.__module__
package = module.split('.')[0]
except (AttributeError, IndexError):
return None
try:
graph_interface = INTERFACES[package]
except KeyError:
return None
return graph_interface(G)
| true | true |
1c32e5600b4003e9f85a4060cae7a7cc3ab0c06e | 43 | py | Python | Ejercicio2.0.py | acasasaez/Warm-up | 9b97ee8d7d4a1c6e0b577b392c5fd62d5e1727f3 | [
"Apache-2.0"
] | null | null | null | Ejercicio2.0.py | acasasaez/Warm-up | 9b97ee8d7d4a1c6e0b577b392c5fd62d5e1727f3 | [
"Apache-2.0"
] | null | null | null | Ejercicio2.0.py | acasasaez/Warm-up | 9b97ee8d7d4a1c6e0b577b392c5fd62d5e1727f3 | [
"Apache-2.0"
] | null | null | null | print (chr(27)+ "[0;32;40m" + "Hola Mundo") | 43 | 43 | 0.581395 | print (chr(27)+ "[0;32;40m" + "Hola Mundo") | true | true |
1c32e56a8b489e0337bcce9ebcb6a97161fef89f | 518 | py | Python | plotly/validators/layout/slider/_templateitemname.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/layout/slider/_templateitemname.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/layout/slider/_templateitemname.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='templateitemname',
parent_name='layout.slider',
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'arraydraw'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 27.263158 | 78 | 0.627413 | import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='templateitemname',
parent_name='layout.slider',
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'arraydraw'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| true | true |
1c32e6ea491d0cdacd28a51303b5f5cb694e4266 | 11,781 | py | Python | landsat/landsat.py | willemarcel/landsat-util | 5804331f4f3d757312931c2ede5714cf7407f60c | [
"CC0-1.0"
] | 1 | 2017-02-17T13:27:09.000Z | 2017-02-17T13:27:09.000Z | landsat/landsat.py | willemarcel/landsat-util | 5804331f4f3d757312931c2ede5714cf7407f60c | [
"CC0-1.0"
] | null | null | null | landsat/landsat.py | willemarcel/landsat-util | 5804331f4f3d757312931c2ede5714cf7407f60c | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# USGS Landsat Imagery Util
#
#
# Author: developmentseed
# Contributer: scisco, KAPPS-
#
# License: CC0 1.0 Universal
from __future__ import print_function
import sys
import subprocess
import argparse
import textwrap
import json
from dateutil.parser import parse
from gs_helper import GsHelper
from clipper_helper import Clipper
from search_helper import Search
from general_helper import reformat_date
from image_helper import Process
import settings
DESCRIPTION = """Landsat-util is a command line utility that makes it easy to
search, download, and process Landsat imagery.
Commands:
Search:
landsat.py search [-h] [-l LIMIT] [-s START] [-e END] [-c CLOUD]
[--onlysearch] [--imageprocess]
{pr,shapefile,country}
positional arguments:
{pr,shapefile,country}
Search commands
pr Activate paths and rows
shapefile Activate Shapefile
country Activate country
optional arguments:
-h, --help show this help message and exit
-l LIMIT, --limit LIMIT
Search return results limit default is 100
-s START, --start START
Start Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-e END, --end END End Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-c CLOUD, --cloud CLOUD
Maximum cloud percentage default is 20 perct
-d, --download Use this flag to download found images
--imageprocess If this flag is used, the images are downloaded
and process. Be cautious as it might take a
long time to both download and process large
batches of images
--pansharpen Whether to also pansharpen the process image.
Pansharpening takes a long time
Download:
landsat download [-h] sceneID [sceneID ...]
positional arguments:
sceneID Provide Full sceneID, e.g. LC81660392014196LGN00
Process:
landsat.py process [-h] [--pansharpen] path
positional arguments:
path Path to the compressed image file
optional arguments:
--pansharpen Whether to also pansharpen the process image.
Pansharpening takes a long time
"""
def args_options():
parser = argparse.ArgumentParser(prog='landsat',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(DESCRIPTION))
subparsers = parser.add_subparsers(help='Landsat Utility',
dest='subs')
# Search Logic
parser_search = subparsers.add_parser('search',
help='Search Landsat metdata')
# Global search options
parser_search.add_argument('-l', '--limit', default=100, type=int,
help='Search return results limit\n'
'default is 100')
parser_search.add_argument('-s', '--start',
help='Start Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-e', '--end',
help='End Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-c', '--cloud', type=float, default=20.0,
help='Maximum cloud percentage '
'default is 20 perct')
parser_search.add_argument('-d', '--download', action='store_true',
help='Use this flag to download found images')
parser_search.add_argument('--imageprocess', action='store_true',
help='If this flag is used, the images are '
'downloaded and process. Be cautious as it '
'might take a long time to both download and '
'process large batches of images')
parser_search.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pan sharpening takes a long time')
search_subparsers = parser_search.add_subparsers(help='Search commands',
dest='search_subs')
search_pr = search_subparsers.add_parser('pr',
help="Activate paths and rows")
search_pr.add_argument('paths_rows',
metavar='path_row',
type=int,
nargs="+",
help="Provide paths and rows")
search_shapefile = search_subparsers.add_parser('shapefile',
help="Activate Shapefile")
search_shapefile.add_argument('path',
help="Path to shapefile")
search_country = search_subparsers.add_parser('country',
help="Activate country")
search_country.add_argument('name', help="Country name e.g. ARE")
parser_download = subparsers.add_parser('download',
help='Download images from Google Storage')
parser_download.add_argument('scenes',
metavar='sceneID',
nargs="+",
help="Provide Full sceneID, e.g. "
"LC81660392014196LGN00")
parser_process = subparsers.add_parser('process',
help='Process Landsat imagery')
parser_process.add_argument('path',
help='Path to the compressed image file')
parser_process.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pan sharpening takes a long time')
parser_process.add_argument('--ndvi', action='store_true',
help='Whether to create a NDVI from the image ')
parser_process.add_argument('--noclouds', action='store_true',
help='Whether to remove clouds and cirrus from '
'the NDVI image. ')
return parser
def main(args):
"""
Main function - launches the program
"""
if args:
if args.subs == 'process':
p = Process(args.path)
if args.pansharpen:
p.full_with_pansharpening()
elif args.ndvi:
if args.noclouds:
p.full(ndvi=True, no_clouds=True)
else:
p.full(ndvi=True)
else:
p.full()
exit("The output is stored at %s." % settings.PROCESSED_IMAGE)
elif args.subs == 'search':
try:
if args.start:
args.start = reformat_date(parse(args.start))
if args.end:
args.end = reformat_date(parse(args.end))
except TypeError:
exit("You date format is incorrect. Please try again!", 1)
s = Search()
if args.search_subs == 'pr':
result = s.search(row_paths=args.paths_rows,
limit=args.limit,
start_date=args.start,
end_date=args.end,
cloud_max=args.cloud)
elif args.search_subs == 'shapefile':
clipper = Clipper()
result = s.search(clipper.shapefile(args.path),
limit=args.limit,
start_date=args.start,
end_date=args.end,
cloud_max=args.cloud)
elif args.search_subs == 'country':
clipper = Clipper()
prs = clipper.country(args.name)
if prs:
result = s.search(prs,
limit=args.limit,
start_date=args.start,
end_date=args.end,
cloud_max=args.cloud)
try:
if result['status'] == 'SUCCESS':
print('%s items were found' % result['total'])
if result['total'] > 100:
exit('Too many results. Please narrow your search')
else:
print(json.dumps(result, sort_keys=True, indent=4))
# If only search
if args.download:
gs = GsHelper()
print('Starting the download:')
for item in result['results']:
gs.single_download(row=item['row'],
path=item['path'],
name=item['sceneID'])
print("%s images were downloaded"
% result['total_returned'])
if args.imageprocess:
for item in result['results']:
p = Process('%s/%s.tar.bz' % (gs.zip_dir,
item['sceneID']))
if args.pansharpen:
p.full_with_pansharpening()
else:
p.full()
else:
exit("The downloaded images are located here: %s" %
gs.zip_dir)
else:
exit('Done!')
elif result['status'] == 'error':
exit(result['message'])
except KeyError:
exit('Too Many API queries. You can only query DevSeed\'s '
'API 5 times per minute', 1)
elif args.subs == 'download':
gs = GsHelper()
print('Starting the download:')
for scene in args.scenes:
gs.single_download(row=gs.extract_row_path(scene)[1],
path=gs.extract_row_path(scene)[0],
name=scene)
exit("The downloaded images are located here: %s" % gs.zip_dir)
def exit(message, code=0):
print(message)
sys.exit(code)
def package_installed(package):
"""
Check if a package is installed on the machine
"""
print("Checking if %s is installed on the system" % package)
installed = not subprocess.call(["which", package])
if installed:
print("%s is installed" % package)
return True
else:
print("You have to install %s first!" % package)
return False
def __main__():
global parser
parser = args_options()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
__main__()
| 39.800676 | 87 | 0.482811 |
from __future__ import print_function
import sys
import subprocess
import argparse
import textwrap
import json
from dateutil.parser import parse
from gs_helper import GsHelper
from clipper_helper import Clipper
from search_helper import Search
from general_helper import reformat_date
from image_helper import Process
import settings
DESCRIPTION = """Landsat-util is a command line utility that makes it easy to
search, download, and process Landsat imagery.
Commands:
Search:
landsat.py search [-h] [-l LIMIT] [-s START] [-e END] [-c CLOUD]
[--onlysearch] [--imageprocess]
{pr,shapefile,country}
positional arguments:
{pr,shapefile,country}
Search commands
pr Activate paths and rows
shapefile Activate Shapefile
country Activate country
optional arguments:
-h, --help show this help message and exit
-l LIMIT, --limit LIMIT
Search return results limit default is 100
-s START, --start START
Start Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-e END, --end END End Date - Most formats are accepted e.g.
Jun 12 2014 OR 06/12/2014
-c CLOUD, --cloud CLOUD
Maximum cloud percentage default is 20 perct
-d, --download Use this flag to download found images
--imageprocess If this flag is used, the images are downloaded
and process. Be cautious as it might take a
long time to both download and process large
batches of images
--pansharpen Whether to also pansharpen the process image.
Pansharpening takes a long time
Download:
landsat download [-h] sceneID [sceneID ...]
positional arguments:
sceneID Provide Full sceneID, e.g. LC81660392014196LGN00
Process:
landsat.py process [-h] [--pansharpen] path
positional arguments:
path Path to the compressed image file
optional arguments:
--pansharpen Whether to also pansharpen the process image.
Pansharpening takes a long time
"""
def args_options():
parser = argparse.ArgumentParser(prog='landsat',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(DESCRIPTION))
subparsers = parser.add_subparsers(help='Landsat Utility',
dest='subs')
parser_search = subparsers.add_parser('search',
help='Search Landsat metdata')
parser_search.add_argument('-l', '--limit', default=100, type=int,
help='Search return results limit\n'
'default is 100')
parser_search.add_argument('-s', '--start',
help='Start Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-e', '--end',
help='End Date - Most formats are accepted '
'e.g. Jun 12 2014 OR 06/12/2014')
parser_search.add_argument('-c', '--cloud', type=float, default=20.0,
help='Maximum cloud percentage '
'default is 20 perct')
parser_search.add_argument('-d', '--download', action='store_true',
help='Use this flag to download found images')
parser_search.add_argument('--imageprocess', action='store_true',
help='If this flag is used, the images are '
'downloaded and process. Be cautious as it '
'might take a long time to both download and '
'process large batches of images')
parser_search.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pan sharpening takes a long time')
search_subparsers = parser_search.add_subparsers(help='Search commands',
dest='search_subs')
search_pr = search_subparsers.add_parser('pr',
help="Activate paths and rows")
search_pr.add_argument('paths_rows',
metavar='path_row',
type=int,
nargs="+",
help="Provide paths and rows")
search_shapefile = search_subparsers.add_parser('shapefile',
help="Activate Shapefile")
search_shapefile.add_argument('path',
help="Path to shapefile")
search_country = search_subparsers.add_parser('country',
help="Activate country")
search_country.add_argument('name', help="Country name e.g. ARE")
parser_download = subparsers.add_parser('download',
help='Download images from Google Storage')
parser_download.add_argument('scenes',
metavar='sceneID',
nargs="+",
help="Provide Full sceneID, e.g. "
"LC81660392014196LGN00")
parser_process = subparsers.add_parser('process',
help='Process Landsat imagery')
parser_process.add_argument('path',
help='Path to the compressed image file')
parser_process.add_argument('--pansharpen', action='store_true',
help='Whether to also pansharpen the process '
'image. Pan sharpening takes a long time')
parser_process.add_argument('--ndvi', action='store_true',
help='Whether to create a NDVI from the image ')
parser_process.add_argument('--noclouds', action='store_true',
help='Whether to remove clouds and cirrus from '
'the NDVI image. ')
return parser
def main(args):
if args:
if args.subs == 'process':
p = Process(args.path)
if args.pansharpen:
p.full_with_pansharpening()
elif args.ndvi:
if args.noclouds:
p.full(ndvi=True, no_clouds=True)
else:
p.full(ndvi=True)
else:
p.full()
exit("The output is stored at %s." % settings.PROCESSED_IMAGE)
elif args.subs == 'search':
try:
if args.start:
args.start = reformat_date(parse(args.start))
if args.end:
args.end = reformat_date(parse(args.end))
except TypeError:
exit("You date format is incorrect. Please try again!", 1)
s = Search()
if args.search_subs == 'pr':
result = s.search(row_paths=args.paths_rows,
limit=args.limit,
start_date=args.start,
end_date=args.end,
cloud_max=args.cloud)
elif args.search_subs == 'shapefile':
clipper = Clipper()
result = s.search(clipper.shapefile(args.path),
limit=args.limit,
start_date=args.start,
end_date=args.end,
cloud_max=args.cloud)
elif args.search_subs == 'country':
clipper = Clipper()
prs = clipper.country(args.name)
if prs:
result = s.search(prs,
limit=args.limit,
start_date=args.start,
end_date=args.end,
cloud_max=args.cloud)
try:
if result['status'] == 'SUCCESS':
print('%s items were found' % result['total'])
if result['total'] > 100:
exit('Too many results. Please narrow your search')
else:
print(json.dumps(result, sort_keys=True, indent=4))
if args.download:
gs = GsHelper()
print('Starting the download:')
for item in result['results']:
gs.single_download(row=item['row'],
path=item['path'],
name=item['sceneID'])
print("%s images were downloaded"
% result['total_returned'])
if args.imageprocess:
for item in result['results']:
p = Process('%s/%s.tar.bz' % (gs.zip_dir,
item['sceneID']))
if args.pansharpen:
p.full_with_pansharpening()
else:
p.full()
else:
exit("The downloaded images are located here: %s" %
gs.zip_dir)
else:
exit('Done!')
elif result['status'] == 'error':
exit(result['message'])
except KeyError:
exit('Too Many API queries. You can only query DevSeed\'s '
'API 5 times per minute', 1)
elif args.subs == 'download':
gs = GsHelper()
print('Starting the download:')
for scene in args.scenes:
gs.single_download(row=gs.extract_row_path(scene)[1],
path=gs.extract_row_path(scene)[0],
name=scene)
exit("The downloaded images are located here: %s" % gs.zip_dir)
def exit(message, code=0):
print(message)
sys.exit(code)
def package_installed(package):
print("Checking if %s is installed on the system" % package)
installed = not subprocess.call(["which", package])
if installed:
print("%s is installed" % package)
return True
else:
print("You have to install %s first!" % package)
return False
def __main__():
global parser
parser = args_options()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
__main__()
| true | true |
1c32e6f4423ee74d820255630e8ed91535e91b01 | 2,986 | py | Python | tools/ad_map_access_qgis/ad_map_access_qgis/MapSnappingTest.py | woojinjjang/map-1 | d12bb410f03d078a995130b4e671746ace8b6287 | [
"MIT"
] | 61 | 2019-12-19T20:57:24.000Z | 2022-03-29T15:20:51.000Z | tools/ad_map_access_qgis/ad_map_access_qgis/MapSnappingTest.py | woojinjjang/map-1 | d12bb410f03d078a995130b4e671746ace8b6287 | [
"MIT"
] | 54 | 2020-04-05T05:32:47.000Z | 2022-03-15T18:42:33.000Z | tools/ad_map_access_qgis/ad_map_access_qgis/MapSnappingTest.py | woojinjjang/map-1 | d12bb410f03d078a995130b4e671746ace8b6287 | [
"MIT"
] | 31 | 2019-12-20T07:37:39.000Z | 2022-03-16T13:06:16.000Z | # ----------------- BEGIN LICENSE BLOCK ---------------------------------
#
# Copyright (C) 2018-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
# ----------------- END LICENSE BLOCK -----------------------------------
"..."
import ad_map_access as ad
import Globs
from qgis.gui import QgsMapToolEmitPoint
from qgis.core import QgsField
from PyQt5.QtCore import QVariant
import qgis.PyQt.QtCore
from .QGISLayer import WGS84PointLayer
class MapSnappingTest(QgsMapToolEmitPoint):
"..."
TITLE = "Map-Snapped"
SYMBOL = "diamond"
COLOR = "226, 226, 0"
SIZE = "5"
def __init__(self, action, snapper):
"..."
QgsMapToolEmitPoint.__init__(self, Globs.iface.mapCanvas())
self.action = action
self.snapper = snapper
self.action.setChecked(False)
self.layer_group = None
self.layer = None
def destroy(self):
"..."
self.layer = None
def activate(self):
"..."
super(MapSnappingTest, self).activate()
self.__create_layer__()
self.action.setChecked(True)
Globs.log.info("Map Snapping Test Activated")
def deactivate(self):
"..."
super(MapSnappingTest, self).deactivate()
self.action.setChecked(False)
self.layer.remove_all_features()
self.layer.refresh()
Globs.log.info("Map Snapping Test Deactivated")
def canvasReleaseEvent(self, event): # pylint: disable=invalid-name
"..."
self.layer.remove_all_features()
raw_pt = self.toLayerCoordinates(self.layer.layer, event.pos())
pt_geo = ad.map.point.createGeoPoint(raw_pt.x(), raw_pt.y(), 0)
enu_pt = ad.map.point.toENU(pt_geo)
mmpts = self.snapper.snap(raw_pt)
Globs.log.info("{} -> {}".format(pt_geo, enu_pt))
if mmpts is not None:
for mmpt in mmpts:
self.layer.add_ecef(mmpt.matchedPoint, [
int(mmpt.lanePoint.paraPoint.laneId), str(mmpt.type), float(mmpt.lanePoint.lateralT), float(mmpt.lanePoint.laneWidth), float(mmpt.lanePoint.laneLength), str(enu_pt)])
self.layer.refresh()
def __create_layer__(self):
"..."
if self.layer is None:
attrs = [QgsField("Lane Id", QVariant.LongLong),
QgsField("Pos Type", QVariant.String),
QgsField("Lateral-T", QVariant.Double),
QgsField("Lane-Width", QVariant.Double),
QgsField("Lane-Length", QVariant.Double),
QgsField("ENU Point", QVariant.String)]
self.layer = WGS84PointLayer(Globs.iface,
self.TITLE,
self.SYMBOL,
self.COLOR,
self.SIZE,
attrs,
self.layer_group)
| 35.547619 | 202 | 0.543202 |
import ad_map_access as ad
import Globs
from qgis.gui import QgsMapToolEmitPoint
from qgis.core import QgsField
from PyQt5.QtCore import QVariant
import qgis.PyQt.QtCore
from .QGISLayer import WGS84PointLayer
class MapSnappingTest(QgsMapToolEmitPoint):
TITLE = "Map-Snapped"
SYMBOL = "diamond"
COLOR = "226, 226, 0"
SIZE = "5"
def __init__(self, action, snapper):
QgsMapToolEmitPoint.__init__(self, Globs.iface.mapCanvas())
self.action = action
self.snapper = snapper
self.action.setChecked(False)
self.layer_group = None
self.layer = None
def destroy(self):
self.layer = None
def activate(self):
super(MapSnappingTest, self).activate()
self.__create_layer__()
self.action.setChecked(True)
Globs.log.info("Map Snapping Test Activated")
def deactivate(self):
super(MapSnappingTest, self).deactivate()
self.action.setChecked(False)
self.layer.remove_all_features()
self.layer.refresh()
Globs.log.info("Map Snapping Test Deactivated")
def canvasReleaseEvent(self, event):
self.layer.remove_all_features()
raw_pt = self.toLayerCoordinates(self.layer.layer, event.pos())
pt_geo = ad.map.point.createGeoPoint(raw_pt.x(), raw_pt.y(), 0)
enu_pt = ad.map.point.toENU(pt_geo)
mmpts = self.snapper.snap(raw_pt)
Globs.log.info("{} -> {}".format(pt_geo, enu_pt))
if mmpts is not None:
for mmpt in mmpts:
self.layer.add_ecef(mmpt.matchedPoint, [
int(mmpt.lanePoint.paraPoint.laneId), str(mmpt.type), float(mmpt.lanePoint.lateralT), float(mmpt.lanePoint.laneWidth), float(mmpt.lanePoint.laneLength), str(enu_pt)])
self.layer.refresh()
def __create_layer__(self):
if self.layer is None:
attrs = [QgsField("Lane Id", QVariant.LongLong),
QgsField("Pos Type", QVariant.String),
QgsField("Lateral-T", QVariant.Double),
QgsField("Lane-Width", QVariant.Double),
QgsField("Lane-Length", QVariant.Double),
QgsField("ENU Point", QVariant.String)]
self.layer = WGS84PointLayer(Globs.iface,
self.TITLE,
self.SYMBOL,
self.COLOR,
self.SIZE,
attrs,
self.layer_group)
| true | true |
1c32e7e594a8a0d9fba4cbb31dabf80383c02ce9 | 1,190 | py | Python | py/py_0183_maximum_product_of_parts.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0183_maximum_product_of_parts.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | py/py_0183_maximum_product_of_parts.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
] | null | null | null | # Solution of;
# Project Euler Problem 183: Maximum product of parts
# https://projecteuler.net/problem=183
#
# Let N be a positive integer and let N be split into k equal parts, r = N/k,
# so that N = r + r + . . . + r. Let P be the product of these parts, P = r ×
# r × . . . × r = rk. For example, if 11 is split into five equal parts, 11 =
# 2. 2 + 2. 2 + 2. 2 + 2. 2 + 2. 2, then P = 2. 25 = 51. 53632. Let M(N) =
# Pmax for a given value of N. It turns out that the maximum for N = 11 is
# found by splitting eleven into four equal parts which leads to Pmax =
# (11/4)4; that is, M(11) = 14641/256 = 57. 19140625, which is a terminating
# decimal. However, for N = 8 the maximum is achieved by splitting it into
# three equal parts, so M(8) = 512/27, which is a non-terminating decimal. Let
# D(N) = N if M(N) is a non-terminating decimal and D(N) = -N if M(N) is a
# terminating decimal. For example, ∑ D(N) for 5 ≤ N ≤ 100 is 2438. Find ∑
# D(N) for 5 ≤ N ≤ 10000.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 183
timed.caller(dummy, n, i, prob_id)
| 38.387097 | 79 | 0.633613 |
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 183
timed.caller(dummy, n, i, prob_id)
| true | true |
1c32e9360606b10ab00f8696f91f0d35e73a7e74 | 7,471 | py | Python | fastr/DrivenExtraction.py | remydecoupes/covid19-tweets-mood-tetis | beb3553f7c080f283751d2be14990475008bd82d | [
"CECILL-B"
] | null | null | null | fastr/DrivenExtraction.py | remydecoupes/covid19-tweets-mood-tetis | beb3553f7c080f283751d2be14990475008bd82d | [
"CECILL-B"
] | null | null | null | fastr/DrivenExtraction.py | remydecoupes/covid19-tweets-mood-tetis | beb3553f7c080f283751d2be14990475008bd82d | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue May 9 14:07:38 2017
For: UMR TETIS RESEARCH UNIT
Author: Gaurav_Shrivastava
"""
import os
import sys
import numpy as np
import pandas as pd
import nltk
import re
import matplotlib as plt
import json
from collections import defaultdict
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
import argparse
'''for generating the proper tag for lemmatizer'''
def is_noun(tag):
return tag in ['NN', 'NNS', 'NNP', 'NNPS']
def is_verb(tag):
return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
def is_adverb(tag):
return tag in ['RB', 'RBR', 'RBS']
def is_adjective(tag):
return tag in ['JJ', 'JJR', 'JJS']
def penn_to_wn(tag):
if is_adjective(tag):
return wn.ADJ
elif is_noun(tag):
return wn.NOUN
elif is_adverb(tag):
return wn.ADV
elif is_verb(tag):
return wn.VERB
return None
'''converts the text corpora into list of tokens'''
def pre_processing(document):
sentences = nltk.sent_tokenize(document)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
#sentences = [nltk.pos_tag(sent) for sent in sentences]
return sentences
'''Part of speech tagging for documents to preserve context for lemmatizer'''
def pos_tag(document):
sentences = nltk.sent_tokenize(document)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [nltk.pos_tag(sent) for sent in sentences]
return sentences
'''Utility function to flatten list of lists into single list'''
def flatten(lists):
newlist = []
for item in lists:
for index in range(len(item)):
newlist.append(item[index])
return newlist
'''reading text corpus'''
def reading_valorcarn_corpus(filename):
f = open(filename)
string = f.read()
docs = string.split("##########END##########")
return docs
'''reading list of terms'''
def read_file(filename):
f = open(filename)
string = f.read()
word = string.split('\n')
return word
def normalise(words,tags):
"""Normalises words to lowercase, stems and lemmatizes it.(input is a word)"""
normalised = defaultdict(list)
counter = 0
for i in range(len(words)):
word = words[i].lower()
#
if penn_to_wn(tags[i][1]) != None:
word = lemmatizer.lemmatize(word,pos = penn_to_wn(tags[i][1]))
word = stemmer.stem(word)
normalised[word].append(counter)
counter = counter + 1
return normalised
def list_normalize(words):
"""Normalises words to lowercase,
stems and lemmatizes it.(input is list of words)"""
normalised = defaultdict(list)
counter = 0
for i in range(len(words)):
word = words[i].lower()
#if penn_to_wn(tags[i][1]) != None:
word = lemmatizer.lemmatize(word)#,pos = penn_to_wn(tags[i][1]))
word = stemmer.stem(word)
normalised[word].append(counter)
counter = counter + 1
return normalised
def normalize(word):
word = lemmatizer.lemmatize(word)#,pos = penn_to_wn(tags[i][1]))
word = stemmer.stem(word)
return word
def extract_singles_variation(words,norm_dict,filtered_words):
singles = defaultdict(list)
for word in words:
if ' ' not in word:
temporary_extract = extract_singles(word,norm_dict,filtered_words)
try:
singles[word].append(list(set(temporary_extract)))
except:
print("Error on extract singles variation with word: "+word)
return singles
def extract_singles(word, norm_dict, filtered_words):
word = normalize(word)
if word in norm_dict:
temp = norm_dict[word]
word_list = []
for entry in temp:
word_list.append(filtered_words[entry])
return word_list
return None
def extract_couples_variation(words, norm_dict,filtered_words,k):
couples = defaultdict(list)
for word in words:
if ' ' in word:
temp = word.split(' ')
if len(temp) == 2:
word1 = temp[0]
word2 = temp[1]
temporary_extract = extract_couples(word1,word2, norm_dict,filtered_words,k)
print(word,temporary_extract)
if temporary_extract != None:
couples[word].append(temporary_extract)
return couples
def extract_couples(word1, word2, norm_dict, filtered_words,k):
#find root for both words
word1 = normalize(word1)
word2 = normalize(word2)
if word1 in norm_dict:
if word2 in norm_dict:
word_set = set([])
#extract the occurances of the root word in the corpus
instance1 = norm_dict[word1]
instance2 = norm_dict[word2]
#matching the word with at most k words occuring between the root of the two words
extracted = matching(instance1,instance2,k)
for extract in extracted:
terms = terms_extract(extract,filtered_words)
word_set.add(terms)
return list(word_set)
return None
def terms_extract(extract,filtered_words):
terminology =''
for entry in range(extract[0],extract[1]):
terminology = terminology + filtered_words[entry] + ' '
terminology = terminology + filtered_words[extract[1]]
return terminology
def matching(array1, array2,k):
extracted = []
for entry in array1:
for i in range(entry - k, entry + k):
if i in array2:
mini = min([entry,i])
maxi = max([entry,i])
extracted.append([mini,maxi])
break
return extracted
def reading_corpus(filename):
f= open(filename)
string = f.read()
return string
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input", help="give input text corpus")
parser.add_argument("list", help="input terminology list")
parser.add_argument("-k", "--K_value", help="give K_value(int), Default is 3", default=3)
parser.add_argument("-s", "--size", help="slicing the size of input terminology list to: default = 100", default=100)
args = parser.parse_args()
filename = args.input
if not os.path.isfile(filename):
print('Give correct path to text corpora ""\(-_-)/""')
return None
#filename = 'Valorcarn_web_corpus.txt'
docs = reading_corpus(filename)#reading_valorcarn_corpus(filename)
words = []
tags = []
'''
for doc in docs:
words.append(pre_processing(doc))
tags.append(pos_tag(doc))
'''
words = pre_processing(docs)
tags = pos_tag(docs)
words = flatten(words)
tags = flatten(tags)
#words = flatten(words)
#tags = flatten(tags)
global stemmer, lemmatizer
stemmer = nltk.stem.porter.PorterStemmer()
lemmatizer = nltk.WordNetLemmatizer()
filtered_words = [word for word in words if word not in stopwords.words('english')]
filtered_tags = [word for word in tags if word[0] not in stopwords.words('english')]
normalised = normalise(filtered_words,filtered_tags)
list_name = args.list
if not os.path.isfile(filename):
print('Give correct path to input list ""\(-_-)/"" check --help')
return None
lists = read_file(list_name)
singles = extract_singles_variation(lists,normalised,filtered_words)
#couples = extract_couples_variation(lists,normalised,filtered_words)
normalised = normalise(words,tags)
size = int(args.size)
K_value = int(args.K_value)
couples = extract_couples_variation(lists[:size],normalised,words,K_value)
keys = set(couples.keys())
for key in keys:
if len(couples[key][0]) ==1:
del couples[key]
open('driven_extraction_version_1.json','w').write(json.dumps(couples,indent = 4))
if __name__ == '__main__':
main() | 29.070039 | 119 | 0.681435 |
import os
import sys
import numpy as np
import pandas as pd
import nltk
import re
import matplotlib as plt
import json
from collections import defaultdict
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
import argparse
def is_noun(tag):
return tag in ['NN', 'NNS', 'NNP', 'NNPS']
def is_verb(tag):
return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
def is_adverb(tag):
return tag in ['RB', 'RBR', 'RBS']
def is_adjective(tag):
return tag in ['JJ', 'JJR', 'JJS']
def penn_to_wn(tag):
if is_adjective(tag):
return wn.ADJ
elif is_noun(tag):
return wn.NOUN
elif is_adverb(tag):
return wn.ADV
elif is_verb(tag):
return wn.VERB
return None
def pre_processing(document):
sentences = nltk.sent_tokenize(document)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
return sentences
def pos_tag(document):
sentences = nltk.sent_tokenize(document)
sentences = [nltk.word_tokenize(sent) for sent in sentences]
sentences = [nltk.pos_tag(sent) for sent in sentences]
return sentences
def flatten(lists):
newlist = []
for item in lists:
for index in range(len(item)):
newlist.append(item[index])
return newlist
def reading_valorcarn_corpus(filename):
f = open(filename)
string = f.read()
docs = string.split("##########END##########")
return docs
def read_file(filename):
f = open(filename)
string = f.read()
word = string.split('\n')
return word
def normalise(words,tags):
normalised = defaultdict(list)
counter = 0
for i in range(len(words)):
word = words[i].lower()
if penn_to_wn(tags[i][1]) != None:
word = lemmatizer.lemmatize(word,pos = penn_to_wn(tags[i][1]))
word = stemmer.stem(word)
normalised[word].append(counter)
counter = counter + 1
return normalised
def list_normalize(words):
normalised = defaultdict(list)
counter = 0
for i in range(len(words)):
word = words[i].lower()
word = lemmatizer.lemmatize(word)
word = stemmer.stem(word)
normalised[word].append(counter)
counter = counter + 1
return normalised
def normalize(word):
word = lemmatizer.lemmatize(word)
word = stemmer.stem(word)
return word
def extract_singles_variation(words,norm_dict,filtered_words):
singles = defaultdict(list)
for word in words:
if ' ' not in word:
temporary_extract = extract_singles(word,norm_dict,filtered_words)
try:
singles[word].append(list(set(temporary_extract)))
except:
print("Error on extract singles variation with word: "+word)
return singles
def extract_singles(word, norm_dict, filtered_words):
word = normalize(word)
if word in norm_dict:
temp = norm_dict[word]
word_list = []
for entry in temp:
word_list.append(filtered_words[entry])
return word_list
return None
def extract_couples_variation(words, norm_dict,filtered_words,k):
couples = defaultdict(list)
for word in words:
if ' ' in word:
temp = word.split(' ')
if len(temp) == 2:
word1 = temp[0]
word2 = temp[1]
temporary_extract = extract_couples(word1,word2, norm_dict,filtered_words,k)
print(word,temporary_extract)
if temporary_extract != None:
couples[word].append(temporary_extract)
return couples
def extract_couples(word1, word2, norm_dict, filtered_words,k):
word1 = normalize(word1)
word2 = normalize(word2)
if word1 in norm_dict:
if word2 in norm_dict:
word_set = set([])
instance1 = norm_dict[word1]
instance2 = norm_dict[word2]
extracted = matching(instance1,instance2,k)
for extract in extracted:
terms = terms_extract(extract,filtered_words)
word_set.add(terms)
return list(word_set)
return None
def terms_extract(extract,filtered_words):
terminology =''
for entry in range(extract[0],extract[1]):
terminology = terminology + filtered_words[entry] + ' '
terminology = terminology + filtered_words[extract[1]]
return terminology
def matching(array1, array2,k):
extracted = []
for entry in array1:
for i in range(entry - k, entry + k):
if i in array2:
mini = min([entry,i])
maxi = max([entry,i])
extracted.append([mini,maxi])
break
return extracted
def reading_corpus(filename):
f= open(filename)
string = f.read()
return string
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input", help="give input text corpus")
parser.add_argument("list", help="input terminology list")
parser.add_argument("-k", "--K_value", help="give K_value(int), Default is 3", default=3)
parser.add_argument("-s", "--size", help="slicing the size of input terminology list to: default = 100", default=100)
args = parser.parse_args()
filename = args.input
if not os.path.isfile(filename):
print('Give correct path to text corpora ""\(-_-)/""')
return None
docs = reading_corpus(filename)
words = []
tags = []
words = pre_processing(docs)
tags = pos_tag(docs)
words = flatten(words)
tags = flatten(tags)
global stemmer, lemmatizer
stemmer = nltk.stem.porter.PorterStemmer()
lemmatizer = nltk.WordNetLemmatizer()
filtered_words = [word for word in words if word not in stopwords.words('english')]
filtered_tags = [word for word in tags if word[0] not in stopwords.words('english')]
normalised = normalise(filtered_words,filtered_tags)
list_name = args.list
if not os.path.isfile(filename):
print('Give correct path to input list ""\(-_-)/"" check --help')
return None
lists = read_file(list_name)
singles = extract_singles_variation(lists,normalised,filtered_words)
normalised = normalise(words,tags)
size = int(args.size)
K_value = int(args.K_value)
couples = extract_couples_variation(lists[:size],normalised,words,K_value)
keys = set(couples.keys())
for key in keys:
if len(couples[key][0]) ==1:
del couples[key]
open('driven_extraction_version_1.json','w').write(json.dumps(couples,indent = 4))
if __name__ == '__main__':
main() | true | true |
1c32e9a0925ed2999d02030f82708738ca6d9199 | 22,061 | py | Python | language/install.py | ritabt/legion | 62d9a513bb85050995b1837d132faddce9c1a8a4 | [
"Apache-2.0"
] | null | null | null | language/install.py | ritabt/legion | 62d9a513bb85050995b1837d132faddce9c1a8a4 | [
"Apache-2.0"
] | null | null | null | language/install.py | ritabt/legion | 62d9a513bb85050995b1837d132faddce9c1a8a4 | [
"Apache-2.0"
] | 1 | 2021-01-07T22:07:18.000Z | 2021-01-07T22:07:18.000Z | #!/usr/bin/env python
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse, json, multiprocessing, os, platform, shutil, subprocess, sys
# Requires:
# * Terra-compatible LLVM installation on PATH
_version = sys.version_info.major
if _version == 2: # Python 2.x:
_input = raw_input
elif _version == 3: # Python 3.x:
_input = input
else:
raise Exception('Incompatible Python version')
# allow the make executable name to be overridden by the environment
make_exe = os.environ.get('MAKE', 'make')
os_name = platform.system()
if os_name == 'Linux':
dylib_ext = '.so'
elif os_name == 'Darwin':
dylib_ext = '.dylib'
elif os_name == 'FreeBSD':
dylib_ext = '.so'
make_exe = os.environ.get('MAKE', 'gmake') # default needs to be GNU make
else:
raise Exception('install.py script does not work on %s' % platform.system())
def download(dest_path, url, sha1):
dest_dir = os.path.dirname(dest_path)
dest_file = os.path.basename(dest_path)
subprocess.check_call(['wget', '-O', dest_path, url])
shasum = subprocess.Popen(
['shasum', '--check'], stdin=subprocess.PIPE, cwd=dest_dir)
shasum.communicate('%s %s' % (sha1, dest_file))
assert shasum.wait() == 0
def git_clone(repo_dir, url, branch=None):
subprocess.check_call(['git', 'clone'] +
(['-b', branch] if branch else []) +
[url, repo_dir])
def git_update(repo_dir):
subprocess.check_call(
['git', 'pull', '--ff-only'],
cwd=repo_dir)
def git_submodule_update(repo_dir):
subprocess.check_call(
['git', 'submodule', 'update', '--init'],
cwd=repo_dir)
def load_json_config(filename):
try:
with open(filename, 'r') as f:
return json.load(f)
except IOError:
return None
def dump_json_config(filename, value):
with open(filename, 'w') as f:
return json.dump(value, f)
prompt_text = '''
RDIR is an optional compiler plugin for Regent which provides support
for dataflow optimizations (most notably control replication). RDIR
support is opt-in because RDIR's license is different from that of
Regent (thus this prompt). Specifically:
* portions of RDIR are licensed under BSD
* other portions of RDIR are dual-licensed under BSD and Apache
(Regent itself is licensed entirely under Apache.)
You may choose to use RDIR automatically (select "auto" below),
manually, or not at all. Your preference will be saved. You can change
your mind at any time by re-running this script with the "--rdir"
parameter.
'''
def install_rdir(rdir, legion_dir, regent_dir):
config_filename = os.path.join(regent_dir, '.rdir.json')
if rdir is None:
rdir = load_json_config(config_filename)
if rdir is None: rdir = 'prompt'
if rdir == 'prompt':
print(prompt_text)
while rdir not in ['auto', 'manual', 'never']:
rdir = _input('Enable RDIR? (auto/manual/never) ')
assert rdir in ['auto', 'manual', 'skip', 'never']
if rdir == 'auto':
git_submodule_update(legion_dir)
if rdir != 'skip':
dump_json_config(config_filename, rdir)
def build_terra(terra_dir, terra_branch, use_cmake, cmake_exe, thread_count, llvm):
build_dir = os.path.join(terra_dir, 'build')
release_dir = os.path.join(terra_dir, 'release')
if use_cmake is None:
build_detected = os.path.exists(os.path.join(build_dir, 'main.o'))
cmake_detected = os.path.exists(os.path.join(build_dir, 'CMakeCache.txt'))
use_cmake = cmake_detected or not build_detected
if not use_cmake:
print('Detected previous Makefile build in Terra, disabling Terra CMake build...')
flags = []
if llvm:
assert not use_cmake, "LLVM mode not supported with Terra CMake build, see https://github.com/zdevito/terra/issues/394"
flags.extend(['REEXPORT_LLVM_COMPONENTS=irreader mcjit x86'])
if use_cmake:
if not os.path.exists(os.path.join(build_dir, 'CMakeCache.txt')):
subprocess.check_call(
[cmake_exe, '..', '-DCMAKE_INSTALL_PREFIX=%s' % release_dir],
cwd=build_dir)
subprocess.check_call(
[make_exe, 'install', '-j', str(thread_count)],
cwd=build_dir)
else:
subprocess.check_call(
[make_exe, 'all', '-j', str(thread_count)] + flags,
cwd=terra_dir)
def install_terra(terra_dir, terra_url, terra_branch, use_cmake, cmake_exe,
external_terra_dir, thread_count, llvm):
if external_terra_dir is not None:
if terra_url is not None or terra_branch is not None:
raise Exception('Terra URL/branch are incompatible with setting an external installation directory')
external_terra_dir = os.path.expanduser(external_terra_dir)
if not os.path.isdir(external_terra_dir):
print('Error: No such directory %s' %
external_terra_dir)
sys.exit(1)
if os.path.lexists(terra_dir):
if not os.path.islink(terra_dir):
print('Error: Attempting build with external Terra when internal Terra')
print('already exists. Please remove the following directory to continue with')
print('an external Terra installation.')
print(' %s' % terra_dir)
sys.exit(1)
if os.path.realpath(terra_dir) != os.path.realpath(external_terra_dir):
os.unlink(terra_dir)
os.symlink(external_terra_dir, terra_dir)
else:
print(external_terra_dir, terra_dir)
os.symlink(external_terra_dir, terra_dir)
return
elif os.path.islink(terra_dir):
if terra_url is not None or terra_branch is not None:
raise Exception('Terra URL/branch are incompatible with setting an external installation directory')
print('Reusing existing external Terra:')
print(' %s' % os.path.realpath(terra_dir))
print()
return
if not os.path.exists(terra_dir):
if terra_url is None:
terra_url = 'https://github.com/terralang/terra.git'
if terra_branch is None:
terra_branch = 'master'
git_clone(terra_dir, terra_url, terra_branch)
else:
if terra_url is not None or terra_branch is not None:
raise Exception('Terra URL/branch must be set on first install, please delete the terra directory and try again')
git_update(terra_dir)
build_terra(terra_dir, terra_branch, use_cmake, cmake_exe, thread_count, llvm)
def install_luarocks(terra_dir, luarocks_dir):
if not os.path.exists(luarocks_dir):
# For now we need to use Git until the following patch makes
# it into a release:
# https://github.com/luarocks/luarocks/commit/708fed20d013e69fd79d80f0b59a45a25eed3a00
luarocks_url = 'https://github.com/luarocks/luarocks.git'
luarocks_branch = 'master'
git_clone(luarocks_dir, luarocks_url, luarocks_branch)
if os.path.exists(os.path.join(terra_dir, 'bin', 'terra')):
terra_prefix = os.path.join(terra_dir)
elif os.path.exists(os.path.join(terra_dir, 'release', 'bin', 'terra')):
terra_prefix = os.path.join(terra_dir, 'release')
else:
raise Exception('Unable to determine correct prefix for LuaRocks installation')
luarocks_prefix = os.path.join(luarocks_dir, 'install')
luarocks_exe = os.path.join(luarocks_prefix, 'bin', 'luarocks')
if not os.path.exists(luarocks_exe):
subprocess.check_call(
[os.path.join(luarocks_dir, 'configure'),
'--prefix=%s' % luarocks_prefix,
'--with-lua=%s' % terra_prefix,
'--with-lua-include=%s' % os.path.join(terra_prefix, 'include', 'terra'),
'--with-lua-interpreter=terra'],
cwd=luarocks_dir)
# Hack: This throws an error but we'll keep going anyway...
subprocess.call(['make'], cwd=luarocks_dir)
subprocess.check_call(['make', 'install'], cwd=luarocks_dir)
ldoc_exe = os.path.join(luarocks_prefix, 'bin', 'ldoc')
if not os.path.exists(ldoc_exe):
ldoc_url = 'https://raw.githubusercontent.com/StanfordLegion/LDoc/master/ldoc-scm-2.rockspec'
subprocess.check_call([luarocks_exe, 'install', ldoc_url])
def symlink(from_path, to_path):
if not os.path.lexists(to_path):
os.symlink(from_path, to_path)
def install_bindings(regent_dir, legion_dir, bindings_dir, python_bindings_dir, runtime_dir,
cmake, cmake_exe, build_dir,
debug, cuda, openmp, python, llvm, hdf, spy,
gasnet, gasnet_dir, conduit, clean_first,
extra_flags, thread_count, verbose):
# Don't blow away an existing directory
assert not (clean_first and build_dir is not None)
if cmake:
regent_build_dir = os.path.join(regent_dir, 'build')
if build_dir is None:
build_dir = regent_build_dir
else:
try:
# check if the link is already there (and pointing at the right
# thing) first
if not os.path.islink(regent_build_dir) or (os.readlink(regent_build_dir) != build_dir):
os.symlink(build_dir, regent_build_dir)
except OSError:
print('Error: Attempting to build with an external build directory when an')
print('internal (or different external) build directory already exists. Please')
print('remove the following directory to continue with the installation:')
print(' %s' % regent_build_dir)
sys.exit(1)
if clean_first:
shutil.rmtree(build_dir)
if not os.path.exists(build_dir):
os.mkdir(build_dir)
cc_flags = os.environ['CC_FLAGS'] if 'CC_FLAGS' in os.environ else ''
flags = (
['-DCMAKE_BUILD_TYPE=%s' % ('Debug' if debug else 'Release'),
'-DLegion_USE_CUDA=%s' % ('ON' if cuda else 'OFF'),
'-DLegion_USE_OpenMP=%s' % ('ON' if openmp else 'OFF'),
'-DLegion_USE_Python=%s' % ('ON' if python else 'OFF'),
'-DLegion_USE_LLVM=%s' % ('ON' if llvm else 'OFF'),
'-DLegion_USE_GASNet=%s' % ('ON' if gasnet else 'OFF'),
'-DLegion_USE_HDF5=%s' % ('ON' if hdf else 'OFF'),
'-DLegion_SPY=%s' % ('ON' if spy else 'OFF'),
'-DLegion_BUILD_BINDINGS=ON',
'-DBUILD_SHARED_LIBS=ON',
] +
extra_flags +
(['-DGASNet_ROOT_DIR=%s' % gasnet_dir] if gasnet_dir is not None else []) +
(['-DGASNet_CONDUIT=%s' % conduit] if conduit is not None else []) +
(['-DCMAKE_CXX_COMPILER=%s' % os.environ['CXX']] if 'CXX' in os.environ else []) +
(['-DCMAKE_CXX_FLAGS=%s' % cc_flags] if cc_flags else []))
if llvm:
# mess with a few things so that Realm uses terra's LLVM
flags.append('-DLegion_ALLOW_MISSING_LLVM_LIBS=ON')
flags.append('-DLegion_LINK_LLVM_LIBS=OFF')
# pass through LLVM_CONFIG, if set
if 'LLVM_CONFIG' in os.environ:
flags.append('-DLLVM_CONFIG_EXECUTABLE=%s' % os.environ['LLVM_CONFIG'])
make_flags = ['VERBOSE=1'] if verbose else []
try:
subprocess.check_output([cmake_exe, '--version'])
except OSError:
print('Error: CMake is not installed or otherwise not executable. Please check')
print('your CMake installation and try again. You can use the --with-cmake flag')
print('to specify the CMake executable if it is not on PATH.')
print()
print('Attempted to execute: %s' % cmake_exe)
sys.exit(1)
subprocess.check_call(
[cmake_exe] + flags + [legion_dir],
cwd=build_dir)
subprocess.check_call(
[make_exe] + make_flags + ['-j', str(thread_count)],
cwd=build_dir)
else:
flags = (
['LG_RT_DIR=%s' % runtime_dir,
'DEFINE_HEADERS_DIR=%s' % bindings_dir, # otherwise Python build recompiles everything
'DEBUG=%s' % (1 if debug else 0),
'USE_CUDA=%s' % (1 if cuda else 0),
'USE_OPENMP=%s' % (1 if openmp else 0),
'USE_PYTHON=%s' % (1 if python else 0),
'USE_LLVM=%s' % (1 if llvm else 0),
'USE_GASNET=%s' % (1 if gasnet else 0),
'USE_HDF=%s' % (1 if hdf else 0),
'USE_SPY=%s' % (1 if spy else 0),
] +
extra_flags +
(['GASNET=%s' % gasnet_dir] if gasnet_dir is not None else []) +
(['CONDUIT=%s' % conduit] if conduit is not None else []) +
(['GCC=%s' % os.environ['CXX']] if 'CXX' in os.environ else []))
if clean_first:
subprocess.check_call(
[make_exe] + flags + ['clean'],
cwd=bindings_dir)
if python:
subprocess.check_call(
[make_exe] + flags + ['clean'],
cwd=python_bindings_dir)
subprocess.check_call(
[make_exe] + flags + ['-j', str(thread_count)],
cwd=bindings_dir)
if python:
subprocess.check_call(
[make_exe] + flags + ['-j', str(thread_count)],
cwd=python_bindings_dir)
# This last bit is necessary because Mac OS X shared libraries
# have paths hard-coded into them, and in this case those paths
# are coming out wrong. Therefore, we need to fix them to use the
# search path again so our scripts can find them properly.
#
# You can sanity check that this step actually worked with the
# commands:
#
# otool -L libregent.so
# ./regent.py
# =package.loadlib('libregent.so', 'init')
if os_name == 'Darwin':
subprocess.check_call(
['install_name_tool', '-change',
'/usr/local/lib/libluajit-5.1.2.dylib', 'libluajit-5.1.2.dylib',
os.path.join(bindings_dir, 'libregent.dylib')])
def get_cmake_config(cmake, regent_dir, default=None):
config_filename = os.path.join(regent_dir, '.cmake.json')
if cmake is None:
cmake = load_json_config(config_filename)
if cmake is None:
cmake = default
assert cmake in [True, False]
dump_json_config(config_filename, cmake)
return cmake
def install(gasnet=False, cuda=False, openmp=False, python=False, llvm=False, hdf=False,
spy=False, conduit=None, cmake=None, rdir=None,
cmake_exe=None, cmake_build_dir=None,
terra_url=None, terra_branch=None, terra_use_cmake=None, external_terra_dir=None,
gasnet_dir=None, debug=False, clean_first=True, extra_flags=[],
thread_count=None, verbose=False):
regent_dir = os.path.dirname(os.path.realpath(__file__))
legion_dir = os.path.dirname(regent_dir)
cmake = get_cmake_config(cmake, regent_dir, default=False)
if clean_first is None:
clean_first = not cmake
if not cmake and cmake_build_dir is not None:
raise Exception('Build directory is only permitted when building with CMake')
if clean_first and cmake_build_dir is not None:
raise Exception('Cannot clean a pre-existing build directory')
if thread_count is None:
thread_count = multiprocessing.cpu_count()
# Grab LG_RT_DIR from the environment if available, otherwise
# assume we're running relative to our own location.
runtime_dir = os.path.join(legion_dir, 'runtime')
if 'LG_RT_DIR' in os.environ:
runtime_dir = os.path.realpath(os.environ['LG_RT_DIR'])
install_rdir(rdir, legion_dir, regent_dir)
terra_dir = os.path.join(regent_dir, 'terra')
install_terra(terra_dir, terra_url, terra_branch, terra_use_cmake, cmake_exe,
external_terra_dir, thread_count, llvm)
# luarocks_dir = os.path.join(regent_dir, 'luarocks')
# install_luarocks(terra_dir, luarocks_dir)
bindings_dir = os.path.join(legion_dir, 'bindings', 'regent')
python_bindings_dir = os.path.join(legion_dir, 'bindings', 'python')
install_bindings(regent_dir, legion_dir, bindings_dir, python_bindings_dir, runtime_dir,
cmake, cmake_exe, cmake_build_dir,
debug, cuda, openmp, python, llvm, hdf, spy,
gasnet, gasnet_dir, conduit, clean_first,
extra_flags, thread_count, verbose)
def driver():
parser = argparse.ArgumentParser(
description='Install Regent front end.')
parser.add_argument(
'--terra-url', dest='terra_url', metavar='URL', required=False,
help='URL to Terra repository to clone (optional).')
parser.add_argument(
'--terra-branch', dest='terra_branch', metavar='BRANCH', required=False,
help='Name of Terra branch to clone (optional).')
parser.add_argument(
'--terra-cmake', dest='terra_use_cmake', action='store_true', required=False,
default=None,
help='Build Terra with CMake.')
parser.add_argument(
'--no-terra-cmake', dest='terra_use_cmake', action='store_false', required=False,
default=None,
help="Don't build Terra with CMake (instead use GNU Make).")
parser.add_argument(
'--with-terra', dest='external_terra_dir', metavar='DIR', required=False,
help='Path to Terra installation directory (optional).')
parser.add_argument(
'--debug', dest='debug', action='store_true', required=False,
default=os.environ.get('DEBUG') == '1',
help='Build Legion with debugging enabled.')
parser.add_argument(
'--gasnet', dest='gasnet', action='store_true', required=False,
default=os.environ.get('USE_GASNET') == '1',
help='Build Legion with GASNet.')
parser.add_argument(
'--with-gasnet', dest='gasnet_dir', metavar='DIR', required=False,
default=os.environ.get('GASNET'),
help='Path to GASNet installation directory.')
parser.add_argument(
'--cuda', dest='cuda', action='store_true', required=False,
default=os.environ.get('USE_CUDA') == '1',
help='Build Legion with CUDA.')
parser.add_argument(
'--openmp', dest='openmp', action='store_true', required=False,
default=os.environ.get('USE_OPENMP') == '1',
help='Build Legion with OpenMP support.')
parser.add_argument(
'--python', dest='python', action='store_true', required=False,
default=os.environ.get('USE_PYTHON') == '1',
help='Build Legion with Python support.')
parser.add_argument(
'--llvm', dest='llvm', action='store_true', required=False,
default=os.environ.get('USE_LLVM') == '1',
help='Build Legion (and compatible Terra) with LLVM support.')
parser.add_argument(
'--hdf5', '--hdf', dest='hdf', action='store_true', required=False,
default=os.environ.get('USE_HDF') == '1',
help='Build Legion with HDF.')
parser.add_argument(
'--spy', dest='spy', action='store_true', required=False,
default=os.environ.get('USE_SPY') == '1',
help='Build Legion with detailed Legion Spy enabled.')
parser.add_argument(
'--conduit', dest='conduit', action='store', required=False,
default=os.environ.get('CONDUIT'),
help='Build Legion with specified GASNet conduit.')
parser.add_argument(
'--cmake', dest='cmake', action='store_true', required=False,
default=os.environ['USE_CMAKE'] == '1' if 'USE_CMAKE' in os.environ else None,
help='Build Legion with CMake.')
parser.add_argument(
'--no-cmake', dest='cmake', action='store_false', required=False,
help="Don't build Legion with CMake (instead use GNU Make).")
parser.add_argument(
'--with-cmake', dest='cmake_exe', metavar='EXE', required=False,
default='cmake',
help='Path to CMake executable (if not on PATH).')
parser.add_argument(
'--with-cmake-build', dest='cmake_build_dir', metavar='DIR', required=False,
help='Path to CMake build directory (optional).')
parser.add_argument(
'--rdir', dest='rdir', required=False,
choices=['prompt', 'auto', 'manual', 'skip', 'never'], default=None,
help='Enable RDIR compiler plugin.')
parser.add_argument(
'--clean', dest='clean_first', action='store_true', required=False,
default=None,
help='Clean before build.')
parser.add_argument(
'--no-clean', '--noclean', dest='clean_first', action='store_false', required=False,
help='Skip clean before build.')
parser.add_argument(
'--extra', dest='extra_flags', action='append', required=False,
default=[],
help='Extra flags for make command.')
parser.add_argument(
'-j', dest='thread_count', nargs='?', type=int,
help='Number threads used to compile.')
parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true', required=False,
help='Enable verbose build output.')
args = parser.parse_args()
install(**vars(args))
if __name__ == '__main__':
driver()
| 43.427165 | 127 | 0.627351 |
from __future__ import print_function
import argparse, json, multiprocessing, os, platform, shutil, subprocess, sys
_version = sys.version_info.major
if _version == 2:
_input = raw_input
elif _version == 3:
_input = input
else:
raise Exception('Incompatible Python version')
make_exe = os.environ.get('MAKE', 'make')
os_name = platform.system()
if os_name == 'Linux':
dylib_ext = '.so'
elif os_name == 'Darwin':
dylib_ext = '.dylib'
elif os_name == 'FreeBSD':
dylib_ext = '.so'
make_exe = os.environ.get('MAKE', 'gmake')
else:
raise Exception('install.py script does not work on %s' % platform.system())
def download(dest_path, url, sha1):
dest_dir = os.path.dirname(dest_path)
dest_file = os.path.basename(dest_path)
subprocess.check_call(['wget', '-O', dest_path, url])
shasum = subprocess.Popen(
['shasum', '--check'], stdin=subprocess.PIPE, cwd=dest_dir)
shasum.communicate('%s %s' % (sha1, dest_file))
assert shasum.wait() == 0
def git_clone(repo_dir, url, branch=None):
subprocess.check_call(['git', 'clone'] +
(['-b', branch] if branch else []) +
[url, repo_dir])
def git_update(repo_dir):
subprocess.check_call(
['git', 'pull', '--ff-only'],
cwd=repo_dir)
def git_submodule_update(repo_dir):
subprocess.check_call(
['git', 'submodule', 'update', '--init'],
cwd=repo_dir)
def load_json_config(filename):
try:
with open(filename, 'r') as f:
return json.load(f)
except IOError:
return None
def dump_json_config(filename, value):
with open(filename, 'w') as f:
return json.dump(value, f)
prompt_text = '''
RDIR is an optional compiler plugin for Regent which provides support
for dataflow optimizations (most notably control replication). RDIR
support is opt-in because RDIR's license is different from that of
Regent (thus this prompt). Specifically:
* portions of RDIR are licensed under BSD
* other portions of RDIR are dual-licensed under BSD and Apache
(Regent itself is licensed entirely under Apache.)
You may choose to use RDIR automatically (select "auto" below),
manually, or not at all. Your preference will be saved. You can change
your mind at any time by re-running this script with the "--rdir"
parameter.
'''
def install_rdir(rdir, legion_dir, regent_dir):
config_filename = os.path.join(regent_dir, '.rdir.json')
if rdir is None:
rdir = load_json_config(config_filename)
if rdir is None: rdir = 'prompt'
if rdir == 'prompt':
print(prompt_text)
while rdir not in ['auto', 'manual', 'never']:
rdir = _input('Enable RDIR? (auto/manual/never) ')
assert rdir in ['auto', 'manual', 'skip', 'never']
if rdir == 'auto':
git_submodule_update(legion_dir)
if rdir != 'skip':
dump_json_config(config_filename, rdir)
def build_terra(terra_dir, terra_branch, use_cmake, cmake_exe, thread_count, llvm):
build_dir = os.path.join(terra_dir, 'build')
release_dir = os.path.join(terra_dir, 'release')
if use_cmake is None:
build_detected = os.path.exists(os.path.join(build_dir, 'main.o'))
cmake_detected = os.path.exists(os.path.join(build_dir, 'CMakeCache.txt'))
use_cmake = cmake_detected or not build_detected
if not use_cmake:
print('Detected previous Makefile build in Terra, disabling Terra CMake build...')
flags = []
if llvm:
assert not use_cmake, "LLVM mode not supported with Terra CMake build, see https://github.com/zdevito/terra/issues/394"
flags.extend(['REEXPORT_LLVM_COMPONENTS=irreader mcjit x86'])
if use_cmake:
if not os.path.exists(os.path.join(build_dir, 'CMakeCache.txt')):
subprocess.check_call(
[cmake_exe, '..', '-DCMAKE_INSTALL_PREFIX=%s' % release_dir],
cwd=build_dir)
subprocess.check_call(
[make_exe, 'install', '-j', str(thread_count)],
cwd=build_dir)
else:
subprocess.check_call(
[make_exe, 'all', '-j', str(thread_count)] + flags,
cwd=terra_dir)
def install_terra(terra_dir, terra_url, terra_branch, use_cmake, cmake_exe,
external_terra_dir, thread_count, llvm):
if external_terra_dir is not None:
if terra_url is not None or terra_branch is not None:
raise Exception('Terra URL/branch are incompatible with setting an external installation directory')
external_terra_dir = os.path.expanduser(external_terra_dir)
if not os.path.isdir(external_terra_dir):
print('Error: No such directory %s' %
external_terra_dir)
sys.exit(1)
if os.path.lexists(terra_dir):
if not os.path.islink(terra_dir):
print('Error: Attempting build with external Terra when internal Terra')
print('already exists. Please remove the following directory to continue with')
print('an external Terra installation.')
print(' %s' % terra_dir)
sys.exit(1)
if os.path.realpath(terra_dir) != os.path.realpath(external_terra_dir):
os.unlink(terra_dir)
os.symlink(external_terra_dir, terra_dir)
else:
print(external_terra_dir, terra_dir)
os.symlink(external_terra_dir, terra_dir)
return
elif os.path.islink(terra_dir):
if terra_url is not None or terra_branch is not None:
raise Exception('Terra URL/branch are incompatible with setting an external installation directory')
print('Reusing existing external Terra:')
print(' %s' % os.path.realpath(terra_dir))
print()
return
if not os.path.exists(terra_dir):
if terra_url is None:
terra_url = 'https://github.com/terralang/terra.git'
if terra_branch is None:
terra_branch = 'master'
git_clone(terra_dir, terra_url, terra_branch)
else:
if terra_url is not None or terra_branch is not None:
raise Exception('Terra URL/branch must be set on first install, please delete the terra directory and try again')
git_update(terra_dir)
build_terra(terra_dir, terra_branch, use_cmake, cmake_exe, thread_count, llvm)
def install_luarocks(terra_dir, luarocks_dir):
if not os.path.exists(luarocks_dir):
# For now we need to use Git until the following patch makes
# it into a release:
# https://github.com/luarocks/luarocks/commit/708fed20d013e69fd79d80f0b59a45a25eed3a00
luarocks_url = 'https://github.com/luarocks/luarocks.git'
luarocks_branch = 'master'
git_clone(luarocks_dir, luarocks_url, luarocks_branch)
if os.path.exists(os.path.join(terra_dir, 'bin', 'terra')):
terra_prefix = os.path.join(terra_dir)
elif os.path.exists(os.path.join(terra_dir, 'release', 'bin', 'terra')):
terra_prefix = os.path.join(terra_dir, 'release')
else:
raise Exception('Unable to determine correct prefix for LuaRocks installation')
luarocks_prefix = os.path.join(luarocks_dir, 'install')
luarocks_exe = os.path.join(luarocks_prefix, 'bin', 'luarocks')
if not os.path.exists(luarocks_exe):
subprocess.check_call(
[os.path.join(luarocks_dir, 'configure'),
'--prefix=%s' % luarocks_prefix,
'--with-lua=%s' % terra_prefix,
'--with-lua-include=%s' % os.path.join(terra_prefix, 'include', 'terra'),
'--with-lua-interpreter=terra'],
cwd=luarocks_dir)
# Hack: This throws an error but we'll keep going anyway...
subprocess.call(['make'], cwd=luarocks_dir)
subprocess.check_call(['make', 'install'], cwd=luarocks_dir)
ldoc_exe = os.path.join(luarocks_prefix, 'bin', 'ldoc')
if not os.path.exists(ldoc_exe):
ldoc_url = 'https://raw.githubusercontent.com/StanfordLegion/LDoc/master/ldoc-scm-2.rockspec'
subprocess.check_call([luarocks_exe, 'install', ldoc_url])
def symlink(from_path, to_path):
if not os.path.lexists(to_path):
os.symlink(from_path, to_path)
def install_bindings(regent_dir, legion_dir, bindings_dir, python_bindings_dir, runtime_dir,
cmake, cmake_exe, build_dir,
debug, cuda, openmp, python, llvm, hdf, spy,
gasnet, gasnet_dir, conduit, clean_first,
extra_flags, thread_count, verbose):
assert not (clean_first and build_dir is not None)
if cmake:
regent_build_dir = os.path.join(regent_dir, 'build')
if build_dir is None:
build_dir = regent_build_dir
else:
try:
# check if the link is already there (and pointing at the right
# thing) first
if not os.path.islink(regent_build_dir) or (os.readlink(regent_build_dir) != build_dir):
os.symlink(build_dir, regent_build_dir)
except OSError:
print('Error: Attempting to build with an external build directory when an')
print('internal (or different external) build directory already exists. Please')
print('remove the following directory to continue with the installation:')
print(' %s' % regent_build_dir)
sys.exit(1)
if clean_first:
shutil.rmtree(build_dir)
if not os.path.exists(build_dir):
os.mkdir(build_dir)
cc_flags = os.environ['CC_FLAGS'] if 'CC_FLAGS' in os.environ else ''
flags = (
['-DCMAKE_BUILD_TYPE=%s' % ('Debug' if debug else 'Release'),
'-DLegion_USE_CUDA=%s' % ('ON' if cuda else 'OFF'),
'-DLegion_USE_OpenMP=%s' % ('ON' if openmp else 'OFF'),
'-DLegion_USE_Python=%s' % ('ON' if python else 'OFF'),
'-DLegion_USE_LLVM=%s' % ('ON' if llvm else 'OFF'),
'-DLegion_USE_GASNet=%s' % ('ON' if gasnet else 'OFF'),
'-DLegion_USE_HDF5=%s' % ('ON' if hdf else 'OFF'),
'-DLegion_SPY=%s' % ('ON' if spy else 'OFF'),
'-DLegion_BUILD_BINDINGS=ON',
'-DBUILD_SHARED_LIBS=ON',
] +
extra_flags +
(['-DGASNet_ROOT_DIR=%s' % gasnet_dir] if gasnet_dir is not None else []) +
(['-DGASNet_CONDUIT=%s' % conduit] if conduit is not None else []) +
(['-DCMAKE_CXX_COMPILER=%s' % os.environ['CXX']] if 'CXX' in os.environ else []) +
(['-DCMAKE_CXX_FLAGS=%s' % cc_flags] if cc_flags else []))
if llvm:
# mess with a few things so that Realm uses terra's LLVM
flags.append('-DLegion_ALLOW_MISSING_LLVM_LIBS=ON')
flags.append('-DLegion_LINK_LLVM_LIBS=OFF')
if 'LLVM_CONFIG' in os.environ:
flags.append('-DLLVM_CONFIG_EXECUTABLE=%s' % os.environ['LLVM_CONFIG'])
make_flags = ['VERBOSE=1'] if verbose else []
try:
subprocess.check_output([cmake_exe, '--version'])
except OSError:
print('Error: CMake is not installed or otherwise not executable. Please check')
print('your CMake installation and try again. You can use the --with-cmake flag')
print('to specify the CMake executable if it is not on PATH.')
print()
print('Attempted to execute: %s' % cmake_exe)
sys.exit(1)
subprocess.check_call(
[cmake_exe] + flags + [legion_dir],
cwd=build_dir)
subprocess.check_call(
[make_exe] + make_flags + ['-j', str(thread_count)],
cwd=build_dir)
else:
flags = (
['LG_RT_DIR=%s' % runtime_dir,
'DEFINE_HEADERS_DIR=%s' % bindings_dir,
'DEBUG=%s' % (1 if debug else 0),
'USE_CUDA=%s' % (1 if cuda else 0),
'USE_OPENMP=%s' % (1 if openmp else 0),
'USE_PYTHON=%s' % (1 if python else 0),
'USE_LLVM=%s' % (1 if llvm else 0),
'USE_GASNET=%s' % (1 if gasnet else 0),
'USE_HDF=%s' % (1 if hdf else 0),
'USE_SPY=%s' % (1 if spy else 0),
] +
extra_flags +
(['GASNET=%s' % gasnet_dir] if gasnet_dir is not None else []) +
(['CONDUIT=%s' % conduit] if conduit is not None else []) +
(['GCC=%s' % os.environ['CXX']] if 'CXX' in os.environ else []))
if clean_first:
subprocess.check_call(
[make_exe] + flags + ['clean'],
cwd=bindings_dir)
if python:
subprocess.check_call(
[make_exe] + flags + ['clean'],
cwd=python_bindings_dir)
subprocess.check_call(
[make_exe] + flags + ['-j', str(thread_count)],
cwd=bindings_dir)
if python:
subprocess.check_call(
[make_exe] + flags + ['-j', str(thread_count)],
cwd=python_bindings_dir)
if os_name == 'Darwin':
subprocess.check_call(
['install_name_tool', '-change',
'/usr/local/lib/libluajit-5.1.2.dylib', 'libluajit-5.1.2.dylib',
os.path.join(bindings_dir, 'libregent.dylib')])
def get_cmake_config(cmake, regent_dir, default=None):
config_filename = os.path.join(regent_dir, '.cmake.json')
if cmake is None:
cmake = load_json_config(config_filename)
if cmake is None:
cmake = default
assert cmake in [True, False]
dump_json_config(config_filename, cmake)
return cmake
def install(gasnet=False, cuda=False, openmp=False, python=False, llvm=False, hdf=False,
spy=False, conduit=None, cmake=None, rdir=None,
cmake_exe=None, cmake_build_dir=None,
terra_url=None, terra_branch=None, terra_use_cmake=None, external_terra_dir=None,
gasnet_dir=None, debug=False, clean_first=True, extra_flags=[],
thread_count=None, verbose=False):
regent_dir = os.path.dirname(os.path.realpath(__file__))
legion_dir = os.path.dirname(regent_dir)
cmake = get_cmake_config(cmake, regent_dir, default=False)
if clean_first is None:
clean_first = not cmake
if not cmake and cmake_build_dir is not None:
raise Exception('Build directory is only permitted when building with CMake')
if clean_first and cmake_build_dir is not None:
raise Exception('Cannot clean a pre-existing build directory')
if thread_count is None:
thread_count = multiprocessing.cpu_count()
runtime_dir = os.path.join(legion_dir, 'runtime')
if 'LG_RT_DIR' in os.environ:
runtime_dir = os.path.realpath(os.environ['LG_RT_DIR'])
install_rdir(rdir, legion_dir, regent_dir)
terra_dir = os.path.join(regent_dir, 'terra')
install_terra(terra_dir, terra_url, terra_branch, terra_use_cmake, cmake_exe,
external_terra_dir, thread_count, llvm)
# luarocks_dir = os.path.join(regent_dir, 'luarocks')
# install_luarocks(terra_dir, luarocks_dir)
bindings_dir = os.path.join(legion_dir, 'bindings', 'regent')
python_bindings_dir = os.path.join(legion_dir, 'bindings', 'python')
install_bindings(regent_dir, legion_dir, bindings_dir, python_bindings_dir, runtime_dir,
cmake, cmake_exe, cmake_build_dir,
debug, cuda, openmp, python, llvm, hdf, spy,
gasnet, gasnet_dir, conduit, clean_first,
extra_flags, thread_count, verbose)
def driver():
parser = argparse.ArgumentParser(
description='Install Regent front end.')
parser.add_argument(
'--terra-url', dest='terra_url', metavar='URL', required=False,
help='URL to Terra repository to clone (optional).')
parser.add_argument(
'--terra-branch', dest='terra_branch', metavar='BRANCH', required=False,
help='Name of Terra branch to clone (optional).')
parser.add_argument(
'--terra-cmake', dest='terra_use_cmake', action='store_true', required=False,
default=None,
help='Build Terra with CMake.')
parser.add_argument(
'--no-terra-cmake', dest='terra_use_cmake', action='store_false', required=False,
default=None,
help="Don't build Terra with CMake (instead use GNU Make).")
parser.add_argument(
'--with-terra', dest='external_terra_dir', metavar='DIR', required=False,
help='Path to Terra installation directory (optional).')
parser.add_argument(
'--debug', dest='debug', action='store_true', required=False,
default=os.environ.get('DEBUG') == '1',
help='Build Legion with debugging enabled.')
parser.add_argument(
'--gasnet', dest='gasnet', action='store_true', required=False,
default=os.environ.get('USE_GASNET') == '1',
help='Build Legion with GASNet.')
parser.add_argument(
'--with-gasnet', dest='gasnet_dir', metavar='DIR', required=False,
default=os.environ.get('GASNET'),
help='Path to GASNet installation directory.')
parser.add_argument(
'--cuda', dest='cuda', action='store_true', required=False,
default=os.environ.get('USE_CUDA') == '1',
help='Build Legion with CUDA.')
parser.add_argument(
'--openmp', dest='openmp', action='store_true', required=False,
default=os.environ.get('USE_OPENMP') == '1',
help='Build Legion with OpenMP support.')
parser.add_argument(
'--python', dest='python', action='store_true', required=False,
default=os.environ.get('USE_PYTHON') == '1',
help='Build Legion with Python support.')
parser.add_argument(
'--llvm', dest='llvm', action='store_true', required=False,
default=os.environ.get('USE_LLVM') == '1',
help='Build Legion (and compatible Terra) with LLVM support.')
parser.add_argument(
'--hdf5', '--hdf', dest='hdf', action='store_true', required=False,
default=os.environ.get('USE_HDF') == '1',
help='Build Legion with HDF.')
parser.add_argument(
'--spy', dest='spy', action='store_true', required=False,
default=os.environ.get('USE_SPY') == '1',
help='Build Legion with detailed Legion Spy enabled.')
parser.add_argument(
'--conduit', dest='conduit', action='store', required=False,
default=os.environ.get('CONDUIT'),
help='Build Legion with specified GASNet conduit.')
parser.add_argument(
'--cmake', dest='cmake', action='store_true', required=False,
default=os.environ['USE_CMAKE'] == '1' if 'USE_CMAKE' in os.environ else None,
help='Build Legion with CMake.')
parser.add_argument(
'--no-cmake', dest='cmake', action='store_false', required=False,
help="Don't build Legion with CMake (instead use GNU Make).")
parser.add_argument(
'--with-cmake', dest='cmake_exe', metavar='EXE', required=False,
default='cmake',
help='Path to CMake executable (if not on PATH).')
parser.add_argument(
'--with-cmake-build', dest='cmake_build_dir', metavar='DIR', required=False,
help='Path to CMake build directory (optional).')
parser.add_argument(
'--rdir', dest='rdir', required=False,
choices=['prompt', 'auto', 'manual', 'skip', 'never'], default=None,
help='Enable RDIR compiler plugin.')
parser.add_argument(
'--clean', dest='clean_first', action='store_true', required=False,
default=None,
help='Clean before build.')
parser.add_argument(
'--no-clean', '--noclean', dest='clean_first', action='store_false', required=False,
help='Skip clean before build.')
parser.add_argument(
'--extra', dest='extra_flags', action='append', required=False,
default=[],
help='Extra flags for make command.')
parser.add_argument(
'-j', dest='thread_count', nargs='?', type=int,
help='Number threads used to compile.')
parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true', required=False,
help='Enable verbose build output.')
args = parser.parse_args()
install(**vars(args))
if __name__ == '__main__':
driver()
| true | true |
1c32ea71e1a82052c0748ab501146930d51a83c6 | 820 | py | Python | city_scrapers/spiders/cuya_archives_advisory.py | PaulR-Docs/city-scrapers-cle | 00f5db64b64e1ea0ddfa3b7f53d1f5db26c1042d | [
"MIT"
] | 14 | 2019-10-18T15:33:43.000Z | 2021-12-17T17:15:36.000Z | city_scrapers/spiders/cuya_archives_advisory.py | PaulR-Docs/city-scrapers-cle | 00f5db64b64e1ea0ddfa3b7f53d1f5db26c1042d | [
"MIT"
] | 35 | 2020-01-16T15:56:46.000Z | 2022-03-19T18:41:42.000Z | city_scrapers/spiders/cuya_archives_advisory.py | PaulR-Docs/city-scrapers-cle | 00f5db64b64e1ea0ddfa3b7f53d1f5db26c1042d | [
"MIT"
] | 13 | 2019-09-16T18:44:45.000Z | 2022-02-22T20:59:26.000Z | from city_scrapers_core.constants import ADVISORY_COMMITTEE
from city_scrapers_core.spiders import CityScrapersSpider
from city_scrapers.mixins import CuyaCountyMixin
class CuyaArchivesAdvisorySpider(CuyaCountyMixin, CityScrapersSpider):
name = "cuya_archives_advisory"
agency = "Cuyahoga County Archives Advisory Commission"
start_urls = ["http://bc.cuyahogacounty.us/en-US/Archives-Advisory-Commission.aspx"]
classification = ADVISORY_COMMITTEE
location = {
"name": "Cuyahoga County Archives Building, 3rd Floor",
"address": "3951 Perkins Ave Cleveland, OH 44114",
}
def _parse_location(self, response):
loc_str = super()._parse_location(response)
if "Perkins" in loc_str:
return self.location
return {"name": "", "address": loc_str}
| 37.272727 | 88 | 0.729268 | from city_scrapers_core.constants import ADVISORY_COMMITTEE
from city_scrapers_core.spiders import CityScrapersSpider
from city_scrapers.mixins import CuyaCountyMixin
class CuyaArchivesAdvisorySpider(CuyaCountyMixin, CityScrapersSpider):
name = "cuya_archives_advisory"
agency = "Cuyahoga County Archives Advisory Commission"
start_urls = ["http://bc.cuyahogacounty.us/en-US/Archives-Advisory-Commission.aspx"]
classification = ADVISORY_COMMITTEE
location = {
"name": "Cuyahoga County Archives Building, 3rd Floor",
"address": "3951 Perkins Ave Cleveland, OH 44114",
}
def _parse_location(self, response):
loc_str = super()._parse_location(response)
if "Perkins" in loc_str:
return self.location
return {"name": "", "address": loc_str}
| true | true |
1c32ec154452a68a67e0bffdbf3555ccbf090d30 | 12,694 | py | Python | src/bin/feature_extract.py | patrickltobing/shallow-wavenet | a7348805825e47a24e3ad0e759cecfe85284ba9f | [
"Apache-2.0"
] | 19 | 2020-02-28T06:25:26.000Z | 2021-04-22T06:28:16.000Z | src/bin/feature_extract.py | patrickltobing/shallow-wavenet | a7348805825e47a24e3ad0e759cecfe85284ba9f | [
"Apache-2.0"
] | null | null | null | src/bin/feature_extract.py | patrickltobing/shallow-wavenet | a7348805825e47a24e3ad0e759cecfe85284ba9f | [
"Apache-2.0"
] | 1 | 2020-05-03T10:54:59.000Z | 2020-05-03T10:54:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 Patrick Lumban Tobing (Nagoya University)
# based on PyTorch implementation for WaveNet vocoder by Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
from __future__ import print_function
import argparse
import multiprocessing as mp
import os
import sys
from distutils.util import strtobool
import logging
import numpy as np
from numpy.matlib import repmat
from scipy.interpolate import interp1d
#from scipy.io import wavfile
from scipy.signal import firwin
from scipy.signal import lfilter
from utils import find_files
from utils import read_txt
from utils import write_hdf5, read_hdf5
from multiprocessing import Array
import pysptk as ps
import pyworld as pw
#import librosa
import soundfile as sf
np.set_printoptions(threshold=np.inf)
FS = 22050
FS = 24000
#FS = 44100
#FS = 48000
SHIFTMS = 5.0
MINF0 = 40
MAXF0 = 700
#MCEP_DIM = 34
MCEP_DIM = 49
MCEP_ALPHA = 0.455
MCEP_ALPHA = 0.466
#MCEP_ALPHA = 0.544
#MCEP_ALPHA = 0.554
FFTL = 1024
LOWPASS_CUTOFF = 20
HIGHPASS_CUTOFF = 70
OVERWRITE = True
def low_cut_filter(x, fs, cutoff=HIGHPASS_CUTOFF):
"""FUNCTION TO APPLY LOW CUT FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low cut filter
Return:
(ndarray): Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def analyze(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
#f0_flr = pw.get_cheaptrick_f0_floor(fs, fftl)
#logging.info(f0_flr)
#fft_size = pw.get_cheaptrick_fft_size(fs, f0_flr)
#logging.info(fft_size)
#f0_flr = pw.get_cheaptrick_f0_floor(fs, fft_size)
#logging.info(f0_flr)
if f0 is None or time_axis is None:
_f0, time_axis = pw.harvest(wav, fs, f0_floor=60.0, frame_period=fperiod)
f0 = pw.stonemask(wav, _f0, time_axis, fs)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
return time_axis, f0, sp, ap
def analyze_range(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
if f0 is None or time_axis is None:
#logging.info("%lf %lf %lf %lf" % (minf0, maxf0, fperiod, fftl))
#logging.info("1")
_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)
#_f0, time_axis = pw.harvest(wav, fs, f0_floor=60, f0_ceil=maxf0, frame_period=fperiod)
#_f0, time_axis = pw.harvest(wav, fs, f0_floor=60, frame_period=fperiod)
#_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, frame_period=fperiod)
#_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, frame_period=fperiod)
#logging.info("2")
f0 = pw.stonemask(wav, _f0, time_axis, fs)
#logging.info("3")
#f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
#logging.info("4")
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
#logging.info("5")
return time_axis, f0, sp, ap
#def read_wav(wav_file, cutoff=HIGHPASS_CUTOFF, fftl_ns=None):
def read_wav(wav_file, cutoff=HIGHPASS_CUTOFF):
#fs, x = wavfile.read(wav_file)
#x = librosa.util.fix_length(x, len(x) + fftl_ns // 2)
x, fs = sf.read(wav_file)
#x = np.array(x, dtype=np.float64)
if cutoff != 0:
x = low_cut_filter(x, fs, cutoff)
return fs, x
def low_pass_filter(x, fs, cutoff=LOWPASS_CUTOFF, padding=True):
"""FUNCTION TO APPLY LOW PASS FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low pass filter
Return:
(ndarray): Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def convert_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0
Args:
f0 (ndarray): original f0 sequence with the shape (T)
Return:
(ndarray): continuous f0 with the shape (T)
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def main():
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument("--expdir", required=True,
type=str, help="directory to save the log")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--hdf5dir", default=None,
help="directory to save hdf5")
parser.add_argument(
"--wavdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--wavanasyndir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--fs", default=FS,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=SHIFTMS,
type=float, help="Frame shift in msec")
parser.add_argument(
"--minf0", default=MINF0,
type=int, help="minimum f0")
parser.add_argument(
"--maxf0", default=MAXF0,
type=int, help="maximum f0")
parser.add_argument(
"--mcep_dim", default=MCEP_DIM,
type=int, help="Dimension of mel cepstrum")
parser.add_argument(
"--mcep_alpha", default=MCEP_ALPHA,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--fftl", default=FFTL,
type=int, help="FFT length")
parser.add_argument(
"--fftl_ns", default=None,
type=int, help="FFT length for noise shaped waveforms")
parser.add_argument(
"--highpass_cutoff", default=HIGHPASS_CUTOFF,
type=int, help="Cut off frequency in lowpass filter")
parser.add_argument("--init", default=False,
type=strtobool, help="flag for computing stats of dtw-ed feature")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
# check directory existence
if (args.wavdir is not None) and (not os.path.exists(args.wavdir)):
os.makedirs(args.wavdir)
if (args.wavanasyndir is not None) and (not os.path.exists(args.wavanasyndir)):
os.makedirs(args.wavanasyndir)
if not os.path.exists(args.hdf5dir):
os.makedirs(args.hdf5dir)
def feature_extract(wav_list, arr):
n_wav = len(wav_list)
n_sample = 0
n_frame = 0
count = 1
max_frame = 0
for wav_name in wav_list:
# load wavfile and highpass-filter
fs, x = read_wav(wav_name, cutoff=args.highpass_cutoff)
n_sample += x.shape[0]
logging.info(wav_name+" "+str(x.shape[0])+" "+str(n_sample)+" "+str(count))
# check sampling frequency
if not fs == args.fs:
logging.debug("ERROR: sampling frequency is not matched.")
sys.exit(1)
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
logging.info(hdf5name)
if not args.init:
_, f0, spc, ap = analyze_range(x, fs=fs, minf0=args.minf0, maxf0=args.maxf0, \
fperiod=args.shiftms, fftl=args.fftl)
# concatenate
uv, cont_f0 = convert_continuos_f0(np.array(f0))
cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)
codeap = pw.code_aperiodicity(ap, fs)
#logging.info(codeap)
logging.info(codeap.shape)
mcep = ps.sp2mc(spc, args.mcep_dim, args.mcep_alpha)
cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)
uv = np.expand_dims(uv, axis=-1)
log_contf0_lpf = np.log(cont_f0_lpf)
feats_lf0 = np.concatenate([uv, log_contf0_lpf, codeap, mcep], axis=1)
logging.info(feats_lf0.shape)
write_hdf5(hdf5name, "/feat_org_lf0", feats_lf0)
n_frame += feats_lf0.shape[0]
if max_frame < feats_lf0.shape[0]:
max_frame = feats_lf0.shape[0]
# overwrite wav file
if args.highpass_cutoff != 0:
#wavfile.write(args.wavdir + "/" + os.path.basename(wav_name), fs, np.int16(x))
sf.write(args.wavdir + "/" + os.path.basename(wav_name), x, fs, 'PCM_16')
wavpath = args.wavanasyndir + "/" + os.path.basename(wav_name)
logging.info(wavpath)
sp_rec = ps.mc2sp(mcep, args.mcep_alpha, args.fftl)
#wav = np.clip(pw.synthesize(f0, sp_rec, ap, fs, frame_period=args.shiftms), -32768, 32767)
wav = np.clip(pw.synthesize(f0, sp_rec, ap, fs, frame_period=args.shiftms), -1, 1)
#wavfile.write(wavpath, fs, np.int16(wav))
sf.write(wavpath, wav, fs, 'PCM_16')
else:
_, f0, _, _ = analyze(x, fs=fs, fperiod=args.shiftms, fftl=args.fftl)
write_hdf5(hdf5name, "/f0", f0)
n_frame += f0.shape[0]
if max_frame < f0.shape[0]:
max_frame = f0.shape[0]
count += 1
arr[0] += n_wav
arr[1] += n_sample
arr[2] += n_frame
if (n_wav > 0):
logging.info(str(arr[0])+" "+str(n_wav)+" "+str(arr[1])+" "+str(n_sample/n_wav)+" "+str(arr[2])\
+" "+str(n_frame/n_wav)+" max_frame = "+str(max_frame))
# divie list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
arr = mp.Array('d', 3)
#logging.info(arr[:])
for f in file_lists:
p = mp.Process(target=feature_extract, args=(f,arr))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
logging.info(str(arr[0])+" "+str(arr[1])+" "+str(arr[1]/arr[0])+" "+str(arr[2])+" "+str(arr[2]/arr[0]))
if __name__ == "__main__":
main()
| 34.778082 | 109 | 0.606507 |
from __future__ import division
from __future__ import print_function
import argparse
import multiprocessing as mp
import os
import sys
from distutils.util import strtobool
import logging
import numpy as np
from numpy.matlib import repmat
from scipy.interpolate import interp1d
from scipy.signal import firwin
from scipy.signal import lfilter
from utils import find_files
from utils import read_txt
from utils import write_hdf5, read_hdf5
from multiprocessing import Array
import pysptk as ps
import pyworld as pw
import soundfile as sf
np.set_printoptions(threshold=np.inf)
FS = 22050
FS = 24000
SHIFTMS = 5.0
MINF0 = 40
MAXF0 = 700
MCEP_DIM = 49
MCEP_ALPHA = 0.455
MCEP_ALPHA = 0.466
FFTL = 1024
LOWPASS_CUTOFF = 20
HIGHPASS_CUTOFF = 70
OVERWRITE = True
def low_cut_filter(x, fs, cutoff=HIGHPASS_CUTOFF):
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def analyze(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
if f0 is None or time_axis is None:
_f0, time_axis = pw.harvest(wav, fs, f0_floor=60.0, frame_period=fperiod)
f0 = pw.stonemask(wav, _f0, time_axis, fs)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
return time_axis, f0, sp, ap
def analyze_range(wav, fs=FS, minf0=MINF0, maxf0=MAXF0, fperiod=SHIFTMS, fftl=FFTL, f0=None, time_axis=None):
if f0 is None or time_axis is None:
_f0, time_axis = pw.harvest(wav, fs, f0_floor=minf0, f0_ceil=maxf0, frame_period=fperiod)
f0 = pw.stonemask(wav, _f0, time_axis, fs)
sp = pw.cheaptrick(wav, f0, time_axis, fs, fft_size=fftl)
ap = pw.d4c(wav, f0, time_axis, fs, fft_size=fftl)
return time_axis, f0, sp, ap
def read_wav(wav_file, cutoff=HIGHPASS_CUTOFF):
x, fs = sf.read(wav_file)
if cutoff != 0:
x = low_cut_filter(x, fs, cutoff)
return fs, x
def low_pass_filter(x, fs, cutoff=LOWPASS_CUTOFF, padding=True):
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def convert_continuos_f0(f0):
uv = np.float32(f0 != 0)
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
nz_frames = np.where(f0 != 0)[0]
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def main():
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument("--expdir", required=True,
type=str, help="directory to save the log")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--hdf5dir", default=None,
help="directory to save hdf5")
parser.add_argument(
"--wavdir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--wavanasyndir", default=None,
help="directory to save of preprocessed wav file")
parser.add_argument(
"--fs", default=FS,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=SHIFTMS,
type=float, help="Frame shift in msec")
parser.add_argument(
"--minf0", default=MINF0,
type=int, help="minimum f0")
parser.add_argument(
"--maxf0", default=MAXF0,
type=int, help="maximum f0")
parser.add_argument(
"--mcep_dim", default=MCEP_DIM,
type=int, help="Dimension of mel cepstrum")
parser.add_argument(
"--mcep_alpha", default=MCEP_ALPHA,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--fftl", default=FFTL,
type=int, help="FFT length")
parser.add_argument(
"--fftl_ns", default=None,
type=int, help="FFT length for noise shaped waveforms")
parser.add_argument(
"--highpass_cutoff", default=HIGHPASS_CUTOFF,
type=int, help="Cut off frequency in lowpass filter")
parser.add_argument("--init", default=False,
type=strtobool, help="flag for computing stats of dtw-ed feature")
parser.add_argument(
"--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
args = parser.parse_args()
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/feature_extract.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
if (args.wavdir is not None) and (not os.path.exists(args.wavdir)):
os.makedirs(args.wavdir)
if (args.wavanasyndir is not None) and (not os.path.exists(args.wavanasyndir)):
os.makedirs(args.wavanasyndir)
if not os.path.exists(args.hdf5dir):
os.makedirs(args.hdf5dir)
def feature_extract(wav_list, arr):
n_wav = len(wav_list)
n_sample = 0
n_frame = 0
count = 1
max_frame = 0
for wav_name in wav_list:
fs, x = read_wav(wav_name, cutoff=args.highpass_cutoff)
n_sample += x.shape[0]
logging.info(wav_name+" "+str(x.shape[0])+" "+str(n_sample)+" "+str(count))
if not fs == args.fs:
logging.debug("ERROR: sampling frequency is not matched.")
sys.exit(1)
hdf5name = args.hdf5dir + "/" + os.path.basename(wav_name).replace(".wav", ".h5")
logging.info(hdf5name)
if not args.init:
_, f0, spc, ap = analyze_range(x, fs=fs, minf0=args.minf0, maxf0=args.maxf0, \
fperiod=args.shiftms, fftl=args.fftl)
uv, cont_f0 = convert_continuos_f0(np.array(f0))
cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (args.shiftms * 0.001)), cutoff=20)
codeap = pw.code_aperiodicity(ap, fs)
logging.info(codeap.shape)
mcep = ps.sp2mc(spc, args.mcep_dim, args.mcep_alpha)
cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)
uv = np.expand_dims(uv, axis=-1)
log_contf0_lpf = np.log(cont_f0_lpf)
feats_lf0 = np.concatenate([uv, log_contf0_lpf, codeap, mcep], axis=1)
logging.info(feats_lf0.shape)
write_hdf5(hdf5name, "/feat_org_lf0", feats_lf0)
n_frame += feats_lf0.shape[0]
if max_frame < feats_lf0.shape[0]:
max_frame = feats_lf0.shape[0]
if args.highpass_cutoff != 0:
sf.write(args.wavdir + "/" + os.path.basename(wav_name), x, fs, 'PCM_16')
wavpath = args.wavanasyndir + "/" + os.path.basename(wav_name)
logging.info(wavpath)
sp_rec = ps.mc2sp(mcep, args.mcep_alpha, args.fftl)
wav = np.clip(pw.synthesize(f0, sp_rec, ap, fs, frame_period=args.shiftms), -1, 1)
sf.write(wavpath, wav, fs, 'PCM_16')
else:
_, f0, _, _ = analyze(x, fs=fs, fperiod=args.shiftms, fftl=args.fftl)
write_hdf5(hdf5name, "/f0", f0)
n_frame += f0.shape[0]
if max_frame < f0.shape[0]:
max_frame = f0.shape[0]
count += 1
arr[0] += n_wav
arr[1] += n_sample
arr[2] += n_frame
if (n_wav > 0):
logging.info(str(arr[0])+" "+str(n_wav)+" "+str(arr[1])+" "+str(n_sample/n_wav)+" "+str(arr[2])\
+" "+str(n_frame/n_wav)+" max_frame = "+str(max_frame))
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
processes = []
arr = mp.Array('d', 3)
for f in file_lists:
p = mp.Process(target=feature_extract, args=(f,arr))
p.start()
processes.append(p)
for p in processes:
p.join()
logging.info(str(arr[0])+" "+str(arr[1])+" "+str(arr[1]/arr[0])+" "+str(arr[2])+" "+str(arr[2]/arr[0]))
if __name__ == "__main__":
main()
| true | true |
1c32ec50134f1af67e3fc4a70e2f7a93960bbedc | 1,162 | py | Python | probs/prob38.py | mattrid93/ProjectEuler | 3e1cf1bad9581e526b37d17e20b5fe8af837c1c6 | [
"MIT"
] | null | null | null | probs/prob38.py | mattrid93/ProjectEuler | 3e1cf1bad9581e526b37d17e20b5fe8af837c1c6 | [
"MIT"
] | null | null | null | probs/prob38.py | mattrid93/ProjectEuler | 3e1cf1bad9581e526b37d17e20b5fe8af837c1c6 | [
"MIT"
] | null | null | null | """Problem 38: Pandigital multiples"""
import unittest
def is_pandigital(n):
"""Tests if n is a 1-9 pandigital"""
if len(str(n)) != 9:
return False
if "0" in str(n):
return False
return len(str(n)) == len(set(str(n)))
def multiply_by_1_m(n):
"""Returns concatenated product of n with 1-m (until digits > 8)"""
conc_product = ""
i = 1
while len(conc_product) < 9:
conc_product += str(n*i)
i += 1
return int(conc_product)
def solution():
largest = 0
for n in range(10000):
prod = multiply_by_1_m(n)
if is_pandigital(prod) and prod > largest:
largest = prod
return largest
class TestFunction(unittest.TestCase):
def test_tester(self):
self.assertTrue(is_pandigital(192384576))
self.assertTrue(is_pandigital(918273645))
self.assertFalse(is_pandigital(192384575))
self.assertFalse(is_pandigital(19236))
def test_multiplier(self):
self.assertEqual(multiply_by_1_m(192), 192384576)
self.assertEqual(multiply_by_1_m(9), 918273645)
if __name__ == "__main__":
print(solution())
unittest.main()
| 27.023256 | 71 | 0.635972 | import unittest
def is_pandigital(n):
if len(str(n)) != 9:
return False
if "0" in str(n):
return False
return len(str(n)) == len(set(str(n)))
def multiply_by_1_m(n):
conc_product = ""
i = 1
while len(conc_product) < 9:
conc_product += str(n*i)
i += 1
return int(conc_product)
def solution():
largest = 0
for n in range(10000):
prod = multiply_by_1_m(n)
if is_pandigital(prod) and prod > largest:
largest = prod
return largest
class TestFunction(unittest.TestCase):
def test_tester(self):
self.assertTrue(is_pandigital(192384576))
self.assertTrue(is_pandigital(918273645))
self.assertFalse(is_pandigital(192384575))
self.assertFalse(is_pandigital(19236))
def test_multiplier(self):
self.assertEqual(multiply_by_1_m(192), 192384576)
self.assertEqual(multiply_by_1_m(9), 918273645)
if __name__ == "__main__":
print(solution())
unittest.main()
| true | true |
1c32ece427ef557825a544911d926880964c2989 | 1,052 | py | Python | Desafios/Mundo 2/ex065.py | lucasllimati/CursoEmVideo | 01d75626baf8b82f141d62f681e55a9bda0099fd | [
"MIT"
] | null | null | null | Desafios/Mundo 2/ex065.py | lucasllimati/CursoEmVideo | 01d75626baf8b82f141d62f681e55a9bda0099fd | [
"MIT"
] | null | null | null | Desafios/Mundo 2/ex065.py | lucasllimati/CursoEmVideo | 01d75626baf8b82f141d62f681e55a9bda0099fd | [
"MIT"
] | null | null | null | # 65
# Crie um programa que leia vários números inteiros pelo teclado. No final da execução, mostre a média entre todos os valores e qual foi o maior e o menor valores lidos. O programa deve perguntar ao usuário se ele quer ou não continuar a digitar valores.
# Description
# continuar ~> user response 'do you want to continue?' / validation
# cont ~> counter
# soma ~> sum
# maior ~> highest value / max
# menor ~> lower value / min
# media ~> average / avg
resp = 'S'
cont = 0
soma = 0
maior = 0
menor = 0
media = 0
# cont = soma = maior = menor = media = 0
while resp in 'Ss':
num = int(input('Digite um número: '))
soma += num
cont += 1
if cont == 1:
maior = num
menor = num
else:
if maior < num:
maior = num
if menor > num:
menor = num
resp = input('Quer continuar? [S/N] ').upper().split()[0]
media = soma / cont
print('Você digitou {} número e a média foi {}'.format(cont, media))
print('O maior valor foi {} e o menor foi {}'.format(maior, menor)) | 30.057143 | 254 | 0.615019 |
resp = 'S'
cont = 0
soma = 0
maior = 0
menor = 0
media = 0
while resp in 'Ss':
num = int(input('Digite um número: '))
soma += num
cont += 1
if cont == 1:
maior = num
menor = num
else:
if maior < num:
maior = num
if menor > num:
menor = num
resp = input('Quer continuar? [S/N] ').upper().split()[0]
media = soma / cont
print('Você digitou {} número e a média foi {}'.format(cont, media))
print('O maior valor foi {} e o menor foi {}'.format(maior, menor)) | true | true |
1c32ed6ae2c9c4365fc5b168442472f647ec0389 | 2,778 | py | Python | wkz/file_helper/gpx_parser.py | Anyesh/workoutizer | 5cfdef5d08f63d59d6d7d7922540566f6500753b | [
"MIT"
] | 1 | 2021-09-11T16:09:27.000Z | 2021-09-11T16:09:27.000Z | wkz/file_helper/gpx_parser.py | MakhmudovMurod/workoutizer | 1509b711b43875c5f01d62ddbc182b0ee5e23dd2 | [
"MIT"
] | null | null | null | wkz/file_helper/gpx_parser.py | MakhmudovMurod/workoutizer | 1509b711b43875c5f01d62ddbc182b0ee5e23dd2 | [
"MIT"
] | null | null | null | import logging
import gpxpy
import gpxpy.gpx
from wkz.file_helper.parser import Parser
from wkz.gis.geo import get_total_distance_of_trace
log = logging.getLogger(__name__)
class GPXParser(Parser):
def __init__(self, path_to_file: str, md5sum: str):
super(GPXParser, self).__init__(path_to_file, md5sum)
self.gpx = None
def _parse_metadata(self):
gpx_file = open(self.path_to_file, "r")
self.file_name = self.get_file_name_from_path(self.path_to_file)
self.gpx = gpxpy.parse(gpx_file)
self._get_sport_from_gpx_file()
self._get_duration_from_gpx_file()
def _get_sport_from_gpx_file(self):
if self.gpx.tracks[0].type:
self.sport = self.gpx.tracks[0].type
else:
for e in self.gpx.tracks[0].extensions:
if e.tag.split("}")[1] == "activity":
self.sport = e.text
log.debug(f"found sport: {self.sport}")
def _get_duration_from_gpx_file(self):
all_points_time = []
for s in self.gpx.tracks[0].segments:
for p in s.points:
all_points_time.append(p.time)
start = all_points_time[0]
end = all_points_time[-1]
if start and end:
self.duration = end - start
log.debug(f"found duration: {self.duration}")
else:
log.warning("could not find duration")
if self.gpx.time:
self.date = self.gpx.time
log.debug(f"found date: {self.date}")
else:
self.date = start
log.debug(f"found date: {self.date}")
if not self.date:
log.warning("could not find date in GPX file, will use OS file created date")
self.get_file_created_datetime()
def _parse_records(self):
for track in self.gpx.tracks:
for segment in track.segments:
for point in segment.points:
if point.elevation:
self.altitude_list.append(point.elevation)
self.latitude_list.append(point.latitude)
self.longitude_list.append(point.longitude)
if point.time:
self.timestamps_list.append(point.time.timestamp())
log.debug(f"found number of coordinates: {len(self.longitude_list)}")
log.debug(f"found number of timestamps: {len(self.timestamps_list)}")
log.debug(f"found number of elevation points: {len(self.altitude_list)}")
def _post_process_data(self):
self.distance = get_total_distance_of_trace(
longitude_list=self.longitude_list,
latitude_list=self.latitude_list,
)
log.debug(f"found distance: {self.distance}")
| 36.552632 | 89 | 0.605832 | import logging
import gpxpy
import gpxpy.gpx
from wkz.file_helper.parser import Parser
from wkz.gis.geo import get_total_distance_of_trace
log = logging.getLogger(__name__)
class GPXParser(Parser):
def __init__(self, path_to_file: str, md5sum: str):
super(GPXParser, self).__init__(path_to_file, md5sum)
self.gpx = None
def _parse_metadata(self):
gpx_file = open(self.path_to_file, "r")
self.file_name = self.get_file_name_from_path(self.path_to_file)
self.gpx = gpxpy.parse(gpx_file)
self._get_sport_from_gpx_file()
self._get_duration_from_gpx_file()
def _get_sport_from_gpx_file(self):
if self.gpx.tracks[0].type:
self.sport = self.gpx.tracks[0].type
else:
for e in self.gpx.tracks[0].extensions:
if e.tag.split("}")[1] == "activity":
self.sport = e.text
log.debug(f"found sport: {self.sport}")
def _get_duration_from_gpx_file(self):
all_points_time = []
for s in self.gpx.tracks[0].segments:
for p in s.points:
all_points_time.append(p.time)
start = all_points_time[0]
end = all_points_time[-1]
if start and end:
self.duration = end - start
log.debug(f"found duration: {self.duration}")
else:
log.warning("could not find duration")
if self.gpx.time:
self.date = self.gpx.time
log.debug(f"found date: {self.date}")
else:
self.date = start
log.debug(f"found date: {self.date}")
if not self.date:
log.warning("could not find date in GPX file, will use OS file created date")
self.get_file_created_datetime()
def _parse_records(self):
for track in self.gpx.tracks:
for segment in track.segments:
for point in segment.points:
if point.elevation:
self.altitude_list.append(point.elevation)
self.latitude_list.append(point.latitude)
self.longitude_list.append(point.longitude)
if point.time:
self.timestamps_list.append(point.time.timestamp())
log.debug(f"found number of coordinates: {len(self.longitude_list)}")
log.debug(f"found number of timestamps: {len(self.timestamps_list)}")
log.debug(f"found number of elevation points: {len(self.altitude_list)}")
def _post_process_data(self):
self.distance = get_total_distance_of_trace(
longitude_list=self.longitude_list,
latitude_list=self.latitude_list,
)
log.debug(f"found distance: {self.distance}")
| true | true |
1c32edfabd39c304d7d0088ece80a414f1b16038 | 3,450 | py | Python | kubernetes/client/models/v1_pod_readiness_gate.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_pod_readiness_gate.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_pod_readiness_gate.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | 1 | 2019-01-10T11:13:52.000Z | 2019-01-10T11:13:52.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1PodReadinessGate(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'condition_type': 'str'
}
attribute_map = {
'condition_type': 'conditionType'
}
def __init__(self, condition_type=None):
"""
V1PodReadinessGate - a model defined in Swagger
"""
self._condition_type = None
self.discriminator = None
self.condition_type = condition_type
@property
def condition_type(self):
"""
Gets the condition_type of this V1PodReadinessGate.
ConditionType refers to a condition in the pod's condition list with matching type.
:return: The condition_type of this V1PodReadinessGate.
:rtype: str
"""
return self._condition_type
@condition_type.setter
def condition_type(self, condition_type):
"""
Sets the condition_type of this V1PodReadinessGate.
ConditionType refers to a condition in the pod's condition list with matching type.
:param condition_type: The condition_type of this V1PodReadinessGate.
:type: str
"""
if condition_type is None:
raise ValueError("Invalid value for `condition_type`, must not be `None`")
self._condition_type = condition_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1PodReadinessGate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 26.953125 | 105 | 0.572754 |
from pprint import pformat
from six import iteritems
import re
class V1PodReadinessGate(object):
swagger_types = {
'condition_type': 'str'
}
attribute_map = {
'condition_type': 'conditionType'
}
def __init__(self, condition_type=None):
self._condition_type = None
self.discriminator = None
self.condition_type = condition_type
@property
def condition_type(self):
return self._condition_type
@condition_type.setter
def condition_type(self, condition_type):
if condition_type is None:
raise ValueError("Invalid value for `condition_type`, must not be `None`")
self._condition_type = condition_type
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1PodReadinessGate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c32ee85eb0940f440e81b3cb578789a02c6913d | 611 | py | Python | hydragnn/utils/__init__.py | allaffa/HydraGNN | b48f75cd3fe1b0d03bae9af3e6bdc2bb29f8b9c6 | [
"BSD-3-Clause"
] | null | null | null | hydragnn/utils/__init__.py | allaffa/HydraGNN | b48f75cd3fe1b0d03bae9af3e6bdc2bb29f8b9c6 | [
"BSD-3-Clause"
] | null | null | null | hydragnn/utils/__init__.py | allaffa/HydraGNN | b48f75cd3fe1b0d03bae9af3e6bdc2bb29f8b9c6 | [
"BSD-3-Clause"
] | null | null | null | from .print_utils import print_distributed, iterate_tqdm, setup_log
from .distributed import (
get_comm_size_and_rank,
get_device_list,
get_device,
get_device_name,
get_device_from_name,
is_model_distributed,
get_distributed_model,
setup_ddp,
)
from .model import (
save_model,
get_summary_writer,
load_existing_model,
load_existing_model_config,
loss_function_selection,
)
from .time_utils import Timer, print_timers
from .config_utils import (
update_config,
update_config_minmax,
get_log_name_config,
)
from .optimizer import select_optimizer
| 22.62963 | 67 | 0.767594 | from .print_utils import print_distributed, iterate_tqdm, setup_log
from .distributed import (
get_comm_size_and_rank,
get_device_list,
get_device,
get_device_name,
get_device_from_name,
is_model_distributed,
get_distributed_model,
setup_ddp,
)
from .model import (
save_model,
get_summary_writer,
load_existing_model,
load_existing_model_config,
loss_function_selection,
)
from .time_utils import Timer, print_timers
from .config_utils import (
update_config,
update_config_minmax,
get_log_name_config,
)
from .optimizer import select_optimizer
| true | true |
1c32eebf19a7d097e69ba31db590596e3b126e64 | 7,048 | py | Python | shop/models.py | jeffykle/kf-public | 2ea86908709f9903b626e7be192126053f39dfd2 | [
"MIT"
] | null | null | null | shop/models.py | jeffykle/kf-public | 2ea86908709f9903b626e7be192126053f39dfd2 | [
"MIT"
] | null | null | null | shop/models.py | jeffykle/kf-public | 2ea86908709f9903b626e7be192126053f39dfd2 | [
"MIT"
] | null | null | null | from decimal import Decimal
from address.models import AddressField
from django import forms
from django.contrib.auth.models import Group, Permission, User
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db import models
from django.db.models import Q
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from gallery.models import GalleryImage, GalleryItem, InstallationPage
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel
from taggit.models import TaggedItemBase
from wagtail.admin.edit_handlers import (FieldPanel, FieldRowPanel,
InlinePanel, MultiFieldPanel,
StreamFieldPanel)
from wagtail.contrib.routable_page.models import RoutablePageMixin, route
from wagtail.core import blocks
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page, PageManager
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.search import index
from wagtail.snippets.models import register_snippet
class Shop(RoutablePageMixin, Page):
# ajax_template = "shop/shop_item_ajax.html"
# subpage_types = ['ShopItem']
# parent_page_types = []
def get_context(self, request):
context = super().get_context(request)
shop_items = GalleryItem.objects.filter(Q(direct_sale=True) | Q(
external_sale=True)).live().order_by('-last_published_at')
paginator = Paginator(shop_items, 12)
page = request.GET.get('page')
try:
items = paginator.get_page(page)
except PageNotAnInteger:
items = paginator.get_page(1)
context['shop_items'] = items # shop_items for all items
for item in items:
print(item)
return context
# view method for subpaths of /shop/
@route(r'^((?!cart/|profile/)[-\w]+)/$')
def item_view(self, request, item_slug):
# context = self.get_context(request)
if item_slug == 'cart':
pass
try:
item = get_object_or_404(GalleryItem, slug=item_slug)
except Exception:
item = None
if item is None:
pass
# context["item"] = item
return render(request, "shop/shop_item.html", {
'item': item,
'shop_title': self.title,
'shop_page_title': item.title,
})
class ShopItem(GalleryItem, RoutablePageMixin):
class Meta:
proxy = True
# parent_page_types = ['Shop']
class Cart(Page):
def get_context(self, request):
context = super().get_context(request)
user = request.user
session = request.session
if 'cart' not in session:
session['cart'] = []
context['cart_total'] = Decimal(0.00)
for item_id in session['cart']:
try:
p = GalleryItem.objects.get(pk=item_id).direct_sale_price
context['cart_total'] += p
except GalleryItem.DoesNotExist:
pass
context['cart'] = GalleryItem.objects.filter(pk__in=session['cart'])
return context
class Profile(Page):
def get_context(self, request):
context = super().get_context(request)
context['addresses'] = UserAddress.objects.filter(
user=request.user, active=True)
return context
class Order(models.Model):
STATUS_OPTIONS = (
('processing', 'In Process, payment not submitted.'),
('success', 'Order Completed Succesfully'),
('failed', 'Order Failed'),
)
status = models.CharField(max_length=100, choices=STATUS_OPTIONS)
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, )
items = models.ManyToManyField(GalleryItem,)
date = models.DateTimeField()
user_username = models.CharField(max_length=320, )
user_email = models.CharField(max_length=320, )
total = models.DecimalField(
"Order Total, $", blank=True, null=True, max_digits=6, decimal_places=2,)
shipping_address = models.ForeignKey(
'UserAddress', null=True, on_delete=models.SET_NULL)
first_name = models.CharField(max_length=60, null=True)
last_name = models.CharField(max_length=60, null=True)
paypal_id = models.CharField(max_length=100, null=True, blank=True)
@property
def main_image(self):
try:
img = self.items.first().specific.main_image
except Exception:
print('Main image not found.')
img = None
return img
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.date = timezone.now()
self.user_username = self.user.username
self.user_email = self.user.email
return super(Order, self).save(*args, **kwargs)
@property
def formatted_shipping_address(self):
frmt = ""
addr = self.shipping_address.address
frmt += self.first_name + " " + self.last_name + "\n"
frmt += addr.street_number + " " + addr.route + "\n"
if len(self.shipping_address.address2):
frmt += self.shipping_address.address2 + "\n"
frmt += str(addr.locality).replace(", United States", "")
return frmt
@property
def html_formatted_shipping_address(self):
frmt = ""
addr = self.shipping_address.address
frmt += self.first_name + " " + self.last_name + "<br>"
frmt += addr.street_number + " " + addr.route + "<br>"
if len(self.shipping_address.address2):
frmt += self.shipping_address.address2 + "<br>"
frmt += str(addr.locality).replace(", United States", "")
return frmt
panels = [
FieldRowPanel([
FieldPanel('total'),
FieldPanel('status'),
FieldPanel('paypal_id')
]),
FieldRowPanel([
FieldPanel('first_name'),
FieldPanel('last_name'),
]),
FieldPanel('shipping_address'),
FieldPanel('date'),
FieldPanel('user_username'),
FieldPanel('user_email'),
FieldPanel('items'),
]
class UserAddress(models.Model):
user = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name="user_address")
address = AddressField()
address2 = models.CharField(max_length=60, null=True, blank=True)
active = models.BooleanField(default=True)
@property
def formatted(self):
if self.address2 is not None and len(self.address2) > 0:
return self.address.raw + ', ' + self.address2
return self.address.raw
def __str__(self):
return self.formatted
| 33.245283 | 81 | 0.642594 | from decimal import Decimal
from address.models import AddressField
from django import forms
from django.contrib.auth.models import Group, Permission, User
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db import models
from django.db.models import Q
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from gallery.models import GalleryImage, GalleryItem, InstallationPage
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel
from taggit.models import TaggedItemBase
from wagtail.admin.edit_handlers import (FieldPanel, FieldRowPanel,
InlinePanel, MultiFieldPanel,
StreamFieldPanel)
from wagtail.contrib.routable_page.models import RoutablePageMixin, route
from wagtail.core import blocks
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page, PageManager
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.search import index
from wagtail.snippets.models import register_snippet
class Shop(RoutablePageMixin, Page):
def get_context(self, request):
context = super().get_context(request)
shop_items = GalleryItem.objects.filter(Q(direct_sale=True) | Q(
external_sale=True)).live().order_by('-last_published_at')
paginator = Paginator(shop_items, 12)
page = request.GET.get('page')
try:
items = paginator.get_page(page)
except PageNotAnInteger:
items = paginator.get_page(1)
context['shop_items'] = items
for item in items:
print(item)
return context
@route(r'^((?!cart/|profile/)[-\w]+)/$')
def item_view(self, request, item_slug):
if item_slug == 'cart':
pass
try:
item = get_object_or_404(GalleryItem, slug=item_slug)
except Exception:
item = None
if item is None:
pass
return render(request, "shop/shop_item.html", {
'item': item,
'shop_title': self.title,
'shop_page_title': item.title,
})
class ShopItem(GalleryItem, RoutablePageMixin):
class Meta:
proxy = True
class Cart(Page):
def get_context(self, request):
context = super().get_context(request)
user = request.user
session = request.session
if 'cart' not in session:
session['cart'] = []
context['cart_total'] = Decimal(0.00)
for item_id in session['cart']:
try:
p = GalleryItem.objects.get(pk=item_id).direct_sale_price
context['cart_total'] += p
except GalleryItem.DoesNotExist:
pass
context['cart'] = GalleryItem.objects.filter(pk__in=session['cart'])
return context
class Profile(Page):
def get_context(self, request):
context = super().get_context(request)
context['addresses'] = UserAddress.objects.filter(
user=request.user, active=True)
return context
class Order(models.Model):
STATUS_OPTIONS = (
('processing', 'In Process, payment not submitted.'),
('success', 'Order Completed Succesfully'),
('failed', 'Order Failed'),
)
status = models.CharField(max_length=100, choices=STATUS_OPTIONS)
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, )
items = models.ManyToManyField(GalleryItem,)
date = models.DateTimeField()
user_username = models.CharField(max_length=320, )
user_email = models.CharField(max_length=320, )
total = models.DecimalField(
"Order Total, $", blank=True, null=True, max_digits=6, decimal_places=2,)
shipping_address = models.ForeignKey(
'UserAddress', null=True, on_delete=models.SET_NULL)
first_name = models.CharField(max_length=60, null=True)
last_name = models.CharField(max_length=60, null=True)
paypal_id = models.CharField(max_length=100, null=True, blank=True)
@property
def main_image(self):
try:
img = self.items.first().specific.main_image
except Exception:
print('Main image not found.')
img = None
return img
def save(self, *args, **kwargs):
if not self.id:
self.date = timezone.now()
self.user_username = self.user.username
self.user_email = self.user.email
return super(Order, self).save(*args, **kwargs)
@property
def formatted_shipping_address(self):
frmt = ""
addr = self.shipping_address.address
frmt += self.first_name + " " + self.last_name + "\n"
frmt += addr.street_number + " " + addr.route + "\n"
if len(self.shipping_address.address2):
frmt += self.shipping_address.address2 + "\n"
frmt += str(addr.locality).replace(", United States", "")
return frmt
@property
def html_formatted_shipping_address(self):
frmt = ""
addr = self.shipping_address.address
frmt += self.first_name + " " + self.last_name + "<br>"
frmt += addr.street_number + " " + addr.route + "<br>"
if len(self.shipping_address.address2):
frmt += self.shipping_address.address2 + "<br>"
frmt += str(addr.locality).replace(", United States", "")
return frmt
panels = [
FieldRowPanel([
FieldPanel('total'),
FieldPanel('status'),
FieldPanel('paypal_id')
]),
FieldRowPanel([
FieldPanel('first_name'),
FieldPanel('last_name'),
]),
FieldPanel('shipping_address'),
FieldPanel('date'),
FieldPanel('user_username'),
FieldPanel('user_email'),
FieldPanel('items'),
]
class UserAddress(models.Model):
user = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name="user_address")
address = AddressField()
address2 = models.CharField(max_length=60, null=True, blank=True)
active = models.BooleanField(default=True)
@property
def formatted(self):
if self.address2 is not None and len(self.address2) > 0:
return self.address.raw + ', ' + self.address2
return self.address.raw
def __str__(self):
return self.formatted
| true | true |
1c32eefdfc4cf975266f521a2f569177431466c6 | 6,832 | py | Python | sentry_sdk/integrations/aiohttp.py | Ultimaker/sentry-python | 11b4bae19906cceb2091cafeee458a7f2002e498 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/aiohttp.py | Ultimaker/sentry-python | 11b4bae19906cceb2091cafeee458a7f2002e498 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/aiohttp.py | Ultimaker/sentry-python | 11b4bae19906cceb2091cafeee458a7f2002e498 | [
"BSD-2-Clause"
] | 1 | 2021-02-17T01:38:03.000Z | 2021-02-17T01:38:03.000Z | import sys
import weakref
from sentry_sdk._compat import reraise
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.integrations._wsgi_common import (
_filter_headers,
request_body_within_bounds,
)
from sentry_sdk.tracing import Span
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
transaction_from_function,
HAS_REAL_CONTEXTVARS,
AnnotatedValue,
)
import asyncio
from aiohttp.web import Application, HTTPException, UrlDispatcher
from sentry_sdk._types import MYPY
if MYPY:
from aiohttp.web_request import Request
from aiohttp.abc import AbstractMatchInfo
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Callable
from typing import Union
from sentry_sdk.utils import ExcInfo
from sentry_sdk._types import EventProcessor
class AioHttpIntegration(Integration):
identifier = "aiohttp"
@staticmethod
def setup_once():
# type: () -> None
if not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
raise RuntimeError(
"The aiohttp integration for Sentry requires Python 3.7+ "
" or aiocontextvars package"
)
ignore_logger("aiohttp.server")
old_handle = Application._handle
@asyncio.coroutine
def sentry_app_handle(self, request, *args, **kwargs):
# type: (Any, Request, *Any, **Any) -> Any
@asyncio.coroutine
def inner():
# type: () -> Any
hub = Hub.current
if hub.get_integration(AioHttpIntegration) is None:
old_handle_response = yield from old_handle(self, request, *args, **kwargs)
return old_handle_response
weak_request = weakref.ref(request)
with Hub(Hub.current) as hub:
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope.add_event_processor(_make_request_processor(weak_request))
span = Span.continue_from_headers(request.headers)
span.op = "http.server"
# If this transaction name makes it to the UI, AIOHTTP's
# URL resolver did not find a route or died trying.
span.transaction = "generic AIOHTTP request"
with hub.start_span(span):
try:
response = yield from old_handle(self, request)
except HTTPException as e:
span.set_http_status(e.status_code)
raise
except asyncio.CancelledError:
span.set_status("cancelled")
raise
except Exception:
# This will probably map to a 500 but seems like we
# have no way to tell. Do not set span status.
reraise(*_capture_exception(hub))
span.set_http_status(response.status)
return response
# Explicitly wrap in task such that current contextvar context is
# copied. Just doing `return await inner()` will leak scope data
# between requests.
event_loop_response = yield from asyncio.get_event_loop().create_task(inner())
return event_loop_response
Application._handle = sentry_app_handle
old_urldispatcher_resolve = UrlDispatcher.resolve
@asyncio.coroutine
def sentry_urldispatcher_resolve(self, request):
# type: (UrlDispatcher, Request) -> AbstractMatchInfo
rv = yield from old_urldispatcher_resolve(self, request)
name = None
try:
name = transaction_from_function(rv.handler)
except Exception:
pass
if name is not None:
with Hub.current.configure_scope() as scope:
scope.transaction = name
return rv
UrlDispatcher.resolve = sentry_urldispatcher_resolve
def _make_request_processor(weak_request):
# type: (Callable[[], Request]) -> EventProcessor
def aiohttp_processor(
event, # type: Dict[str, Any]
hint, # type: Dict[str, Tuple[type, BaseException, Any]]
):
# type: (...) -> Dict[str, Any]
request = weak_request()
if request is None:
return event
with capture_internal_exceptions():
request_info = event.setdefault("request", {})
request_info["url"] = "%s://%s%s" % (
request.scheme,
request.host,
request.path,
)
request_info["query_string"] = request.query_string
request_info["method"] = request.method
request_info["env"] = {"REMOTE_ADDR": request.remote}
hub = Hub.current
request_info["headers"] = _filter_headers(dict(request.headers))
# Just attach raw data here if it is within bounds, if available.
# Unfortunately there's no way to get structured data from aiohttp
# without awaiting on some coroutine.
request_info["data"] = get_aiohttp_request_data(hub, request)
return event
return aiohttp_processor
def _capture_exception(hub):
# type: (Hub) -> ExcInfo
exc_info = sys.exc_info()
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options, # type: ignore
mechanism={"type": "aiohttp", "handled": False},
)
hub.capture_event(event, hint=hint)
return exc_info
BODY_NOT_READ_MESSAGE = "[Can't show request body due to implementation details.]"
def get_aiohttp_request_data(hub, request):
# type: (Hub, Request) -> Union[Optional[str], AnnotatedValue]
bytes_body = request._read_bytes
if bytes_body is not None:
# we have body to show
if not request_body_within_bounds(hub.client, len(bytes_body)):
return AnnotatedValue(
"",
{"rem": [["!config", "x", 0, len(bytes_body)]], "len": len(bytes_body)},
)
encoding = request.charset or "utf-8"
return bytes_body.decode(encoding, "replace")
if request.can_read_body:
# body exists but we can't show it
return BODY_NOT_READ_MESSAGE
# request has no body
return None
| 33.655172 | 96 | 0.598507 | import sys
import weakref
from sentry_sdk._compat import reraise
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.integrations._wsgi_common import (
_filter_headers,
request_body_within_bounds,
)
from sentry_sdk.tracing import Span
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
transaction_from_function,
HAS_REAL_CONTEXTVARS,
AnnotatedValue,
)
import asyncio
from aiohttp.web import Application, HTTPException, UrlDispatcher
from sentry_sdk._types import MYPY
if MYPY:
from aiohttp.web_request import Request
from aiohttp.abc import AbstractMatchInfo
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Callable
from typing import Union
from sentry_sdk.utils import ExcInfo
from sentry_sdk._types import EventProcessor
class AioHttpIntegration(Integration):
identifier = "aiohttp"
@staticmethod
def setup_once():
if not HAS_REAL_CONTEXTVARS:
# requests.
raise RuntimeError(
"The aiohttp integration for Sentry requires Python 3.7+ "
" or aiocontextvars package"
)
ignore_logger("aiohttp.server")
old_handle = Application._handle
@asyncio.coroutine
def sentry_app_handle(self, request, *args, **kwargs):
# type: (Any, Request, *Any, **Any) -> Any
@asyncio.coroutine
def inner():
# type: () -> Any
hub = Hub.current
if hub.get_integration(AioHttpIntegration) is None:
old_handle_response = yield from old_handle(self, request, *args, **kwargs)
return old_handle_response
weak_request = weakref.ref(request)
with Hub(Hub.current) as hub:
with hub.configure_scope() as scope:
scope.clear_breadcrumbs()
scope.add_event_processor(_make_request_processor(weak_request))
span = Span.continue_from_headers(request.headers)
span.op = "http.server"
# If this transaction name makes it to the UI, AIOHTTP's
span.transaction = "generic AIOHTTP request"
with hub.start_span(span):
try:
response = yield from old_handle(self, request)
except HTTPException as e:
span.set_http_status(e.status_code)
raise
except asyncio.CancelledError:
span.set_status("cancelled")
raise
except Exception:
reraise(*_capture_exception(hub))
span.set_http_status(response.status)
return response
event_loop_response = yield from asyncio.get_event_loop().create_task(inner())
return event_loop_response
Application._handle = sentry_app_handle
old_urldispatcher_resolve = UrlDispatcher.resolve
@asyncio.coroutine
def sentry_urldispatcher_resolve(self, request):
rv = yield from old_urldispatcher_resolve(self, request)
name = None
try:
name = transaction_from_function(rv.handler)
except Exception:
pass
if name is not None:
with Hub.current.configure_scope() as scope:
scope.transaction = name
return rv
UrlDispatcher.resolve = sentry_urldispatcher_resolve
def _make_request_processor(weak_request):
def aiohttp_processor(
event,
hint,
):
request = weak_request()
if request is None:
return event
with capture_internal_exceptions():
request_info = event.setdefault("request", {})
request_info["url"] = "%s://%s%s" % (
request.scheme,
request.host,
request.path,
)
request_info["query_string"] = request.query_string
request_info["method"] = request.method
request_info["env"] = {"REMOTE_ADDR": request.remote}
hub = Hub.current
request_info["headers"] = _filter_headers(dict(request.headers))
# without awaiting on some coroutine.
request_info["data"] = get_aiohttp_request_data(hub, request)
return event
return aiohttp_processor
def _capture_exception(hub):
# type: (Hub) -> ExcInfo
exc_info = sys.exc_info()
event, hint = event_from_exception(
exc_info,
client_options=hub.client.options, # type: ignore
mechanism={"type": "aiohttp", "handled": False},
)
hub.capture_event(event, hint=hint)
return exc_info
BODY_NOT_READ_MESSAGE = "[Can't show request body due to implementation details.]"
def get_aiohttp_request_data(hub, request):
bytes_body = request._read_bytes
if bytes_body is not None:
if not request_body_within_bounds(hub.client, len(bytes_body)):
return AnnotatedValue(
"",
{"rem": [["!config", "x", 0, len(bytes_body)]], "len": len(bytes_body)},
)
encoding = request.charset or "utf-8"
return bytes_body.decode(encoding, "replace")
if request.can_read_body:
return BODY_NOT_READ_MESSAGE
# request has no body
return None
| true | true |
1c32f1541522154feb791dd95a0f2cb90fe19515 | 20,451 | py | Python | bin/fence_create.py | themarcelor/fence | 9417655f84752477399e71b58b92c4c333b9704c | [
"Apache-2.0"
] | 31 | 2018-01-05T22:49:33.000Z | 2022-02-02T10:30:23.000Z | bin/fence_create.py | themarcelor/fence | 9417655f84752477399e71b58b92c4c333b9704c | [
"Apache-2.0"
] | 737 | 2017-12-11T17:42:11.000Z | 2022-03-29T22:42:52.000Z | bin/fence_create.py | themarcelor/fence | 9417655f84752477399e71b58b92c4c333b9704c | [
"Apache-2.0"
] | 46 | 2018-02-23T09:04:23.000Z | 2022-02-09T18:29:51.000Z | #!/usr/bin/env python
import argparse
import os
import sys
import logging
from cdislogging import get_logger
from fence.jwt import keys
from fence.config import config
from fence.scripting.fence_create import (
JWTCreator,
create_client_action,
create_or_update_google_bucket,
create_google_logging_bucket,
create_sample_data,
delete_client_action,
delete_users,
google_init,
list_client_action,
link_external_bucket,
link_bucket_to_project,
modify_client_action,
notify_problem_users,
remove_expired_google_accounts_from_proxy_groups,
remove_expired_google_service_account_keys,
sync_users,
download_dbgap_files,
delete_expired_service_accounts,
verify_bucket_access_group,
verify_user_registration,
force_update_google_link,
migrate_database,
google_list_authz_groups,
update_user_visas,
)
from fence.settings import CONFIG_SEARCH_FOLDERS
from gen3authz.client.arborist.client import ArboristClient
def str2bool(v):
if v.lower() == "true":
return True
elif v.lower() == "false":
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--path", default="/var/www/fence/", help="path to find configuration"
)
parser.add_argument(
"--arborist",
help="the base URL for the arborist service to sync to",
default=None,
)
subparsers = parser.add_subparsers(title="action", dest="action")
create = subparsers.add_parser("create")
create.add_argument("yaml-file-path", help="Path to a YAML file")
client_create = subparsers.add_parser("client-create")
client_create.add_argument("--client", required=True)
client_create.add_argument("--urls", required=True, nargs="+")
client_create.add_argument(
"--username",
help="user(can represent an organization) that owns the client",
required=True,
)
client_create.add_argument(
"--external",
help="DEPRECATED. is this an external oidc client",
action="store_true",
default=False,
)
client_create.add_argument(
"--auto-approve",
help="whether oidc process skips user consent step",
action="store_true",
default=False,
)
client_create.add_argument(
"--grant-types",
help="which OAuth2 grant types are enabled for this client",
nargs="+",
)
client_create.add_argument(
"--public",
help="whether OAuth2 client should be public (no client secret)",
action="store_true",
default=False,
)
client_create.add_argument(
"--policies", help="which ABAC policies are granted to this client", nargs="*"
)
client_create.add_argument(
"--allowed-scopes", help="which scopes are allowed for this client", nargs="+"
)
client_modify = subparsers.add_parser("client-modify")
client_modify.add_argument("--client", required=True)
client_modify.add_argument("--urls", required=False, nargs="+")
client_modify.add_argument("--name", required=False)
client_modify.add_argument("--description", required=False)
client_modify.add_argument("--allowed-scopes", required=False, nargs="+")
client_modify.add_argument(
"--append",
help="append either new allowed scopes or urls instead of replacing",
action="store_true",
default=False,
)
client_modify.add_argument(
"--set-auto-approve",
help="set the oidc process to skip user consent step",
action="store_true",
default=False,
)
client_modify.add_argument(
"--unset-auto-approve",
help="set the oidc process to not skip user consent step",
action="store_true",
default=False,
)
client_modify.add_argument(
"--delete-urls", help="delete all urls", action="store_true", default=False
)
client_modify.add_argument(
"--policies",
help="which ABAC policies are granted to this client; if given, "
"previous policies will be revoked",
nargs="*",
)
client_list = subparsers.add_parser("client-list")
client_delete = subparsers.add_parser("client-delete")
client_delete.add_argument("--client", required=True)
user_delete = subparsers.add_parser("user-delete")
user_delete.add_argument("--users", required=True, nargs="+")
subparsers.add_parser("expired-service-account-delete")
subparsers.add_parser("bucket-access-group-verify")
hmac_create = subparsers.add_parser("hmac-create")
hmac_create.add_argument("yaml-input")
dbgap_sync = subparsers.add_parser("sync")
dbgap_sync.add_argument(
"--projects", dest="project_mapping", help="Specify project mapping yaml file"
)
dbgap_sync.add_argument("--yaml", help="Sync from yaml file")
dbgap_sync.add_argument("--csv_dir", help="specify csv file directory")
dbgap_sync.add_argument(
"--sync_from_dbgap", help="sync from dbgap server True/False", default="False"
)
dbgap_sync.add_argument(
"--arborist",
help="the base URL for the arborist service to sync to",
default=None,
)
dbgap_sync.add_argument(
"--folder",
required=False,
help="destination where dbGaP whitelist files are saved",
default=None,
)
dbgap_download = subparsers.add_parser("dbgap-download-access-files")
dbgap_download.add_argument(
"--folder",
required=False,
help="destination where dbGaP whitelist files are saved",
default=None,
)
bucket_link_to_project = subparsers.add_parser("link-bucket-to-project")
bucket_link_to_project.add_argument(
"--bucket_id", required=True, help="ID or name for the bucket"
)
bucket_link_to_project.add_argument(
"--bucket_provider", required=True, help="CloudProvider.name for the bucket"
)
bucket_link_to_project.add_argument(
"--project_auth_id", required=True, help="Project.auth_id to link to bucket"
)
google_bucket_create = subparsers.add_parser("google-bucket-create")
google_bucket_create.add_argument(
"--unique-name",
required=True,
help="Name for the bucket, must be globally unique throughout Google",
)
google_bucket_create.add_argument(
"--storage-class",
default=None,
help='Currently must be one of the following: "MULTI_REGIONAL", '
'"REGIONAL", "NEARLINE", "COLDLINE", "STANDARD"',
)
google_bucket_create.add_argument(
"--public",
default=None,
help="whether or not the bucket should be open to the public."
"WARNING: not providing this field will leave the bucket IAM policy"
"untouched. to set or reset the policy use: "
"--public True or --public False",
)
google_bucket_create.add_argument(
"--requester-pays",
action="store_true",
default=False,
help="Whether or not to enable requester_pays on the bucket",
)
google_bucket_create.add_argument(
"--google-project-id",
default=None,
help="Google project this bucket should be associated with",
)
google_bucket_create.add_argument(
"--project-auth-id",
default=None,
help="a Project.auth_id to associate this bucket with. "
"The project must exist in the db already.",
)
google_bucket_create.add_argument(
"--access-logs-bucket",
default=None,
help="Enables logging. Must provide a Google bucket name "
"which will store the access logs",
)
google_bucket_create.add_argument(
"--allowed-privileges",
default=None,
nargs="*",
help="A list of allowed privileges ex: --allowed-privileges admin "
"read write. Currently create a Google Bucket Access Group per "
"privilege.",
)
external_bucket_create = subparsers.add_parser("link-external-bucket")
external_bucket_create.add_argument(
"--bucket-name",
required=True,
help="Name for the bucket, must be globally unique throughout Google",
)
google_logging_bucket_create = subparsers.add_parser("google-logging-bucket-create")
google_logging_bucket_create.add_argument(
"--unique-name",
required=True,
help="Name for the bucket, must be globally unique throughout Google",
)
google_logging_bucket_create.add_argument(
"--storage-class",
default=None,
help='Currently must be one of the following: "MULTI_REGIONAL", '
'"REGIONAL", "NEARLINE", "COLDLINE", "STANDARD"',
)
google_logging_bucket_create.add_argument(
"--google-project-id",
default=None,
help="Google project this bucket should be associated with. "
"If not given, will attempt to determine from provided credentials.",
)
manage_google_keys = subparsers.add_parser("google-manage-keys")
init_google = subparsers.add_parser("google-init")
manage_user_registrations = subparsers.add_parser(
"google-manage-user-registrations"
)
manage_google_accounts = subparsers.add_parser("google-manage-account-access")
token_create = subparsers.add_parser("token-create")
token_create.add_argument("--kid", help="key ID to use for signing tokens")
token_create.add_argument(
"--keys-dir",
help=(
"directory the RSA keys live in; defaults to `keys/` in the root"
" directory for fence"
),
)
token_create.add_argument(
"--type", required=True, help='type of token to create ("access" or "refresh")'
)
token_create.add_argument(
"--username", required=True, help="username to generate the token for"
)
token_create.add_argument(
"--scopes",
required=True,
help='scopes to include in the token (e.g. "user" or "data")',
)
token_create.add_argument("--exp", help="time in seconds until token expiration")
force_link_google = subparsers.add_parser("force-link-google")
force_link_google.add_argument(
"--username", required=True, help="User to link with"
)
force_link_google.add_argument(
"--google-email", required=True, help="Email to link to"
)
force_link_google.add_argument(
"--expires_in",
required=False,
help="The time (in seconds) during which the Google account has bucket access (7 days max/default)",
)
notify_problem_users = subparsers.add_parser("notify-problem-users")
notify_problem_users.add_argument(
"--emails", required=True, nargs="+", help="List of emails to check/notify"
)
notify_problem_users.add_argument(
"--auth_ids",
required=True,
nargs="+",
help="List of project auth_ids to check access to",
)
notify_problem_users.add_argument(
"--check_linking",
required=False,
default=False,
help="True if you want to check that each email has a linked google account",
)
notify_problem_users.add_argument(
"--google_project_id",
required=True,
help="Google Project id that all users belong to",
)
subparsers.add_parser("migrate", help="Migrate the fence database")
subparsers.add_parser(
"google-list-authz-groups",
help="List the Google Buckets "
"Fence is providing access to. Includes Fence Project.auth_id and Google Bucket "
"Access Group",
)
update_visas = subparsers.add_parser(
"update-visas",
help="Update visas and refresh tokens for users with valid visas and refresh tokens.",
)
update_visas.add_argument(
"--chunk-size",
required=False,
help="size of chunk of users we want to take from each query to db. Default value: 10",
)
update_visas.add_argument(
"--concurrency",
required=False,
help="number of concurrent users going through the visa update flow. Default value: 5",
)
update_visas.add_argument(
"--thread-pool-size",
required=False,
help="number of Docker container CPU used for jwt verifcation. Default value: 3",
)
update_visas.add_argument(
"--buffer-size", required=False, help="max size of queue. Default value: 10"
)
return parser.parse_args()
def main():
args = parse_arguments()
# get database information
sys.path.append(args.path)
# replicate cfg loading done in flask app to maintain backwards compatibility
# TODO (DEPRECATE LOCAL_SETTINGS): REMOVE this when putting cfg in
# settings/local_settings is deprecated
import flask
settings_cfg = flask.Config(".")
settings_cfg.from_object("fence.settings")
config.update(dict(settings_cfg))
# END - TODO (DEPRECATE LOCAL_SETTINGS): REMOVE
config.load(search_folders=CONFIG_SEARCH_FOLDERS)
DB = os.environ.get("FENCE_DB") or config.get("DB")
# attempt to get from settings, this is backwards-compatibility for integration
# tests
if DB is None:
try:
from fence.settings import DB
except ImportError:
pass
BASE_URL = os.environ.get("BASE_URL") or config.get("BASE_URL")
ROOT_DIR = os.environ.get("ROOT_DIR") or os.path.dirname(
os.path.dirname(os.path.realpath(__file__))
)
dbGaP = os.environ.get("dbGaP") or config.get("dbGaP")
if not isinstance(dbGaP, list):
dbGaP = [dbGaP]
STORAGE_CREDENTIALS = os.environ.get("STORAGE_CREDENTIALS") or config.get(
"STORAGE_CREDENTIALS"
)
usersync = config.get("USERSYNC", {})
sync_from_visas = usersync.get("sync_from_visas", False)
fallback_to_dbgap_sftp = usersync.get("fallback_to_dbgap_sftp", False)
arborist = None
if args.arborist:
arborist = ArboristClient(
arborist_base_url=args.arborist,
logger=get_logger("user_syncer.arborist_client"),
authz_provider="user-sync",
)
if args.action == "create":
yaml_input = args.__dict__["yaml-file-path"]
create_sample_data(DB, yaml_input)
elif args.action == "client-create":
confidential = not args.public
create_client_action(
DB,
username=args.username,
client=args.client,
urls=args.urls,
auto_approve=args.auto_approve,
grant_types=args.grant_types,
confidential=confidential,
arborist=arborist,
policies=args.policies,
allowed_scopes=args.allowed_scopes,
)
elif args.action == "client-modify":
modify_client_action(
DB,
client=args.client,
delete_urls=args.delete_urls,
urls=args.urls,
name=args.name,
description=args.description,
set_auto_approve=args.set_auto_approve,
unset_auto_approve=args.unset_auto_approve,
arborist=arborist,
policies=args.policies,
allowed_scopes=args.allowed_scopes,
append=args.append,
)
elif args.action == "client-delete":
delete_client_action(DB, args.client)
elif args.action == "client-list":
list_client_action(DB)
elif args.action == "user-delete":
delete_users(DB, args.users)
elif args.action == "expired-service-account-delete":
delete_expired_service_accounts(DB)
elif args.action == "bucket-access-group-verify":
verify_bucket_access_group(DB)
elif args.action == "sync":
sync_users(
dbGaP,
STORAGE_CREDENTIALS,
DB,
projects=args.project_mapping,
is_sync_from_dbgap_server=str2bool(args.sync_from_dbgap),
sync_from_local_csv_dir=args.csv_dir,
sync_from_local_yaml_file=args.yaml,
folder=args.folder,
arborist=arborist,
sync_from_visas=sync_from_visas,
fallback_to_dbgap_sftp=fallback_to_dbgap_sftp,
)
elif args.action == "dbgap-download-access-files":
download_dbgap_files(
dbGaP,
STORAGE_CREDENTIALS,
DB,
folder=args.folder,
)
elif args.action == "google-manage-keys":
remove_expired_google_service_account_keys(DB)
elif args.action == "google-init":
google_init(DB)
elif args.action == "google-manage-user-registrations":
verify_user_registration(DB)
elif args.action == "google-manage-account-access":
remove_expired_google_accounts_from_proxy_groups(DB)
elif args.action == "google-bucket-create":
# true if true provided, false if anything else provided, leave as
# None if not provided at all (policy will remain unchanged)
if args.public and args.public.lower().strip() == "true":
args.public = True
elif args.public is not None:
args.public = False
create_or_update_google_bucket(
DB,
args.unique_name,
storage_class=args.storage_class,
public=args.public,
requester_pays=args.requester_pays,
google_project_id=args.google_project_id,
project_auth_id=args.project_auth_id,
access_logs_bucket=args.access_logs_bucket,
allowed_privileges=args.allowed_privileges,
)
elif args.action == "google-logging-bucket-create":
create_google_logging_bucket(
args.unique_name,
storage_class=args.storage_class,
google_project_id=args.google_project_id,
)
elif args.action == "link-external-bucket":
link_external_bucket(DB, name=args.bucket_name)
elif args.action == "link-bucket-to-project":
link_bucket_to_project(
DB,
bucket_id=args.bucket_id,
bucket_provider=args.bucket_provider,
project_auth_id=args.project_auth_id,
)
elif args.action == "google-list-authz-groups":
google_list_authz_groups(DB)
elif args.action == "token-create":
keys_path = getattr(args, "keys-dir", os.path.join(ROOT_DIR, "keys"))
keypairs = keys.load_keypairs(keys_path)
# Default to the most recent one, but try to find the keypair with
# matching ``kid`` to the argument provided.
keypair = keypairs[-1]
kid = getattr(args, "kid")
if kid:
for try_keypair in keypairs:
if try_keypair.kid == kid:
keypair = try_keypair
break
jwt_creator = JWTCreator(
DB,
BASE_URL,
kid=keypair.kid,
private_key=keypair.private_key,
username=args.username,
scopes=args.scopes,
expires_in=args.exp,
)
token_type = str(args.type).strip().lower()
if token_type == "access_token" or token_type == "access":
print(jwt_creator.create_access_token().token)
elif token_type == "refresh_token" or token_type == "refresh":
print(jwt_creator.create_refresh_token().token)
else:
print(
'invalid token type "{}"; expected "access" or "refresh"'.format(
token_type
)
)
sys.exit(1)
elif args.action == "force-link-google":
exp = force_update_google_link(
DB,
username=args.username,
google_email=args.google_email,
expires_in=args.expires_in,
)
print(exp)
elif args.action == "notify-problem-users":
notify_problem_users(
DB, args.emails, args.auth_ids, args.check_linking, args.google_project_id
)
elif args.action == "migrate":
migrate_database(DB)
elif args.action == "update-visas":
update_user_visas(
DB,
chunk_size=args.chunk_size,
concurrency=args.concurrency,
thread_pool_size=args.thread_pool_size,
buffer_size=args.buffer_size,
)
if __name__ == "__main__":
main()
| 34.899317 | 108 | 0.643489 |
import argparse
import os
import sys
import logging
from cdislogging import get_logger
from fence.jwt import keys
from fence.config import config
from fence.scripting.fence_create import (
JWTCreator,
create_client_action,
create_or_update_google_bucket,
create_google_logging_bucket,
create_sample_data,
delete_client_action,
delete_users,
google_init,
list_client_action,
link_external_bucket,
link_bucket_to_project,
modify_client_action,
notify_problem_users,
remove_expired_google_accounts_from_proxy_groups,
remove_expired_google_service_account_keys,
sync_users,
download_dbgap_files,
delete_expired_service_accounts,
verify_bucket_access_group,
verify_user_registration,
force_update_google_link,
migrate_database,
google_list_authz_groups,
update_user_visas,
)
from fence.settings import CONFIG_SEARCH_FOLDERS
from gen3authz.client.arborist.client import ArboristClient
def str2bool(v):
if v.lower() == "true":
return True
elif v.lower() == "false":
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--path", default="/var/www/fence/", help="path to find configuration"
)
parser.add_argument(
"--arborist",
help="the base URL for the arborist service to sync to",
default=None,
)
subparsers = parser.add_subparsers(title="action", dest="action")
create = subparsers.add_parser("create")
create.add_argument("yaml-file-path", help="Path to a YAML file")
client_create = subparsers.add_parser("client-create")
client_create.add_argument("--client", required=True)
client_create.add_argument("--urls", required=True, nargs="+")
client_create.add_argument(
"--username",
help="user(can represent an organization) that owns the client",
required=True,
)
client_create.add_argument(
"--external",
help="DEPRECATED. is this an external oidc client",
action="store_true",
default=False,
)
client_create.add_argument(
"--auto-approve",
help="whether oidc process skips user consent step",
action="store_true",
default=False,
)
client_create.add_argument(
"--grant-types",
help="which OAuth2 grant types are enabled for this client",
nargs="+",
)
client_create.add_argument(
"--public",
help="whether OAuth2 client should be public (no client secret)",
action="store_true",
default=False,
)
client_create.add_argument(
"--policies", help="which ABAC policies are granted to this client", nargs="*"
)
client_create.add_argument(
"--allowed-scopes", help="which scopes are allowed for this client", nargs="+"
)
client_modify = subparsers.add_parser("client-modify")
client_modify.add_argument("--client", required=True)
client_modify.add_argument("--urls", required=False, nargs="+")
client_modify.add_argument("--name", required=False)
client_modify.add_argument("--description", required=False)
client_modify.add_argument("--allowed-scopes", required=False, nargs="+")
client_modify.add_argument(
"--append",
help="append either new allowed scopes or urls instead of replacing",
action="store_true",
default=False,
)
client_modify.add_argument(
"--set-auto-approve",
help="set the oidc process to skip user consent step",
action="store_true",
default=False,
)
client_modify.add_argument(
"--unset-auto-approve",
help="set the oidc process to not skip user consent step",
action="store_true",
default=False,
)
client_modify.add_argument(
"--delete-urls", help="delete all urls", action="store_true", default=False
)
client_modify.add_argument(
"--policies",
help="which ABAC policies are granted to this client; if given, "
"previous policies will be revoked",
nargs="*",
)
client_list = subparsers.add_parser("client-list")
client_delete = subparsers.add_parser("client-delete")
client_delete.add_argument("--client", required=True)
user_delete = subparsers.add_parser("user-delete")
user_delete.add_argument("--users", required=True, nargs="+")
subparsers.add_parser("expired-service-account-delete")
subparsers.add_parser("bucket-access-group-verify")
hmac_create = subparsers.add_parser("hmac-create")
hmac_create.add_argument("yaml-input")
dbgap_sync = subparsers.add_parser("sync")
dbgap_sync.add_argument(
"--projects", dest="project_mapping", help="Specify project mapping yaml file"
)
dbgap_sync.add_argument("--yaml", help="Sync from yaml file")
dbgap_sync.add_argument("--csv_dir", help="specify csv file directory")
dbgap_sync.add_argument(
"--sync_from_dbgap", help="sync from dbgap server True/False", default="False"
)
dbgap_sync.add_argument(
"--arborist",
help="the base URL for the arborist service to sync to",
default=None,
)
dbgap_sync.add_argument(
"--folder",
required=False,
help="destination where dbGaP whitelist files are saved",
default=None,
)
dbgap_download = subparsers.add_parser("dbgap-download-access-files")
dbgap_download.add_argument(
"--folder",
required=False,
help="destination where dbGaP whitelist files are saved",
default=None,
)
bucket_link_to_project = subparsers.add_parser("link-bucket-to-project")
bucket_link_to_project.add_argument(
"--bucket_id", required=True, help="ID or name for the bucket"
)
bucket_link_to_project.add_argument(
"--bucket_provider", required=True, help="CloudProvider.name for the bucket"
)
bucket_link_to_project.add_argument(
"--project_auth_id", required=True, help="Project.auth_id to link to bucket"
)
google_bucket_create = subparsers.add_parser("google-bucket-create")
google_bucket_create.add_argument(
"--unique-name",
required=True,
help="Name for the bucket, must be globally unique throughout Google",
)
google_bucket_create.add_argument(
"--storage-class",
default=None,
help='Currently must be one of the following: "MULTI_REGIONAL", '
'"REGIONAL", "NEARLINE", "COLDLINE", "STANDARD"',
)
google_bucket_create.add_argument(
"--public",
default=None,
help="whether or not the bucket should be open to the public."
"WARNING: not providing this field will leave the bucket IAM policy"
"untouched. to set or reset the policy use: "
"--public True or --public False",
)
google_bucket_create.add_argument(
"--requester-pays",
action="store_true",
default=False,
help="Whether or not to enable requester_pays on the bucket",
)
google_bucket_create.add_argument(
"--google-project-id",
default=None,
help="Google project this bucket should be associated with",
)
google_bucket_create.add_argument(
"--project-auth-id",
default=None,
help="a Project.auth_id to associate this bucket with. "
"The project must exist in the db already.",
)
google_bucket_create.add_argument(
"--access-logs-bucket",
default=None,
help="Enables logging. Must provide a Google bucket name "
"which will store the access logs",
)
google_bucket_create.add_argument(
"--allowed-privileges",
default=None,
nargs="*",
help="A list of allowed privileges ex: --allowed-privileges admin "
"read write. Currently create a Google Bucket Access Group per "
"privilege.",
)
external_bucket_create = subparsers.add_parser("link-external-bucket")
external_bucket_create.add_argument(
"--bucket-name",
required=True,
help="Name for the bucket, must be globally unique throughout Google",
)
google_logging_bucket_create = subparsers.add_parser("google-logging-bucket-create")
google_logging_bucket_create.add_argument(
"--unique-name",
required=True,
help="Name for the bucket, must be globally unique throughout Google",
)
google_logging_bucket_create.add_argument(
"--storage-class",
default=None,
help='Currently must be one of the following: "MULTI_REGIONAL", '
'"REGIONAL", "NEARLINE", "COLDLINE", "STANDARD"',
)
google_logging_bucket_create.add_argument(
"--google-project-id",
default=None,
help="Google project this bucket should be associated with. "
"If not given, will attempt to determine from provided credentials.",
)
manage_google_keys = subparsers.add_parser("google-manage-keys")
init_google = subparsers.add_parser("google-init")
manage_user_registrations = subparsers.add_parser(
"google-manage-user-registrations"
)
manage_google_accounts = subparsers.add_parser("google-manage-account-access")
token_create = subparsers.add_parser("token-create")
token_create.add_argument("--kid", help="key ID to use for signing tokens")
token_create.add_argument(
"--keys-dir",
help=(
"directory the RSA keys live in; defaults to `keys/` in the root"
" directory for fence"
),
)
token_create.add_argument(
"--type", required=True, help='type of token to create ("access" or "refresh")'
)
token_create.add_argument(
"--username", required=True, help="username to generate the token for"
)
token_create.add_argument(
"--scopes",
required=True,
help='scopes to include in the token (e.g. "user" or "data")',
)
token_create.add_argument("--exp", help="time in seconds until token expiration")
force_link_google = subparsers.add_parser("force-link-google")
force_link_google.add_argument(
"--username", required=True, help="User to link with"
)
force_link_google.add_argument(
"--google-email", required=True, help="Email to link to"
)
force_link_google.add_argument(
"--expires_in",
required=False,
help="The time (in seconds) during which the Google account has bucket access (7 days max/default)",
)
notify_problem_users = subparsers.add_parser("notify-problem-users")
notify_problem_users.add_argument(
"--emails", required=True, nargs="+", help="List of emails to check/notify"
)
notify_problem_users.add_argument(
"--auth_ids",
required=True,
nargs="+",
help="List of project auth_ids to check access to",
)
notify_problem_users.add_argument(
"--check_linking",
required=False,
default=False,
help="True if you want to check that each email has a linked google account",
)
notify_problem_users.add_argument(
"--google_project_id",
required=True,
help="Google Project id that all users belong to",
)
subparsers.add_parser("migrate", help="Migrate the fence database")
subparsers.add_parser(
"google-list-authz-groups",
help="List the Google Buckets "
"Fence is providing access to. Includes Fence Project.auth_id and Google Bucket "
"Access Group",
)
update_visas = subparsers.add_parser(
"update-visas",
help="Update visas and refresh tokens for users with valid visas and refresh tokens.",
)
update_visas.add_argument(
"--chunk-size",
required=False,
help="size of chunk of users we want to take from each query to db. Default value: 10",
)
update_visas.add_argument(
"--concurrency",
required=False,
help="number of concurrent users going through the visa update flow. Default value: 5",
)
update_visas.add_argument(
"--thread-pool-size",
required=False,
help="number of Docker container CPU used for jwt verifcation. Default value: 3",
)
update_visas.add_argument(
"--buffer-size", required=False, help="max size of queue. Default value: 10"
)
return parser.parse_args()
def main():
args = parse_arguments()
sys.path.append(args.path)
import flask
settings_cfg = flask.Config(".")
settings_cfg.from_object("fence.settings")
config.update(dict(settings_cfg))
config.load(search_folders=CONFIG_SEARCH_FOLDERS)
DB = os.environ.get("FENCE_DB") or config.get("DB")
if DB is None:
try:
from fence.settings import DB
except ImportError:
pass
BASE_URL = os.environ.get("BASE_URL") or config.get("BASE_URL")
ROOT_DIR = os.environ.get("ROOT_DIR") or os.path.dirname(
os.path.dirname(os.path.realpath(__file__))
)
dbGaP = os.environ.get("dbGaP") or config.get("dbGaP")
if not isinstance(dbGaP, list):
dbGaP = [dbGaP]
STORAGE_CREDENTIALS = os.environ.get("STORAGE_CREDENTIALS") or config.get(
"STORAGE_CREDENTIALS"
)
usersync = config.get("USERSYNC", {})
sync_from_visas = usersync.get("sync_from_visas", False)
fallback_to_dbgap_sftp = usersync.get("fallback_to_dbgap_sftp", False)
arborist = None
if args.arborist:
arborist = ArboristClient(
arborist_base_url=args.arborist,
logger=get_logger("user_syncer.arborist_client"),
authz_provider="user-sync",
)
if args.action == "create":
yaml_input = args.__dict__["yaml-file-path"]
create_sample_data(DB, yaml_input)
elif args.action == "client-create":
confidential = not args.public
create_client_action(
DB,
username=args.username,
client=args.client,
urls=args.urls,
auto_approve=args.auto_approve,
grant_types=args.grant_types,
confidential=confidential,
arborist=arborist,
policies=args.policies,
allowed_scopes=args.allowed_scopes,
)
elif args.action == "client-modify":
modify_client_action(
DB,
client=args.client,
delete_urls=args.delete_urls,
urls=args.urls,
name=args.name,
description=args.description,
set_auto_approve=args.set_auto_approve,
unset_auto_approve=args.unset_auto_approve,
arborist=arborist,
policies=args.policies,
allowed_scopes=args.allowed_scopes,
append=args.append,
)
elif args.action == "client-delete":
delete_client_action(DB, args.client)
elif args.action == "client-list":
list_client_action(DB)
elif args.action == "user-delete":
delete_users(DB, args.users)
elif args.action == "expired-service-account-delete":
delete_expired_service_accounts(DB)
elif args.action == "bucket-access-group-verify":
verify_bucket_access_group(DB)
elif args.action == "sync":
sync_users(
dbGaP,
STORAGE_CREDENTIALS,
DB,
projects=args.project_mapping,
is_sync_from_dbgap_server=str2bool(args.sync_from_dbgap),
sync_from_local_csv_dir=args.csv_dir,
sync_from_local_yaml_file=args.yaml,
folder=args.folder,
arborist=arborist,
sync_from_visas=sync_from_visas,
fallback_to_dbgap_sftp=fallback_to_dbgap_sftp,
)
elif args.action == "dbgap-download-access-files":
download_dbgap_files(
dbGaP,
STORAGE_CREDENTIALS,
DB,
folder=args.folder,
)
elif args.action == "google-manage-keys":
remove_expired_google_service_account_keys(DB)
elif args.action == "google-init":
google_init(DB)
elif args.action == "google-manage-user-registrations":
verify_user_registration(DB)
elif args.action == "google-manage-account-access":
remove_expired_google_accounts_from_proxy_groups(DB)
elif args.action == "google-bucket-create":
if args.public and args.public.lower().strip() == "true":
args.public = True
elif args.public is not None:
args.public = False
create_or_update_google_bucket(
DB,
args.unique_name,
storage_class=args.storage_class,
public=args.public,
requester_pays=args.requester_pays,
google_project_id=args.google_project_id,
project_auth_id=args.project_auth_id,
access_logs_bucket=args.access_logs_bucket,
allowed_privileges=args.allowed_privileges,
)
elif args.action == "google-logging-bucket-create":
create_google_logging_bucket(
args.unique_name,
storage_class=args.storage_class,
google_project_id=args.google_project_id,
)
elif args.action == "link-external-bucket":
link_external_bucket(DB, name=args.bucket_name)
elif args.action == "link-bucket-to-project":
link_bucket_to_project(
DB,
bucket_id=args.bucket_id,
bucket_provider=args.bucket_provider,
project_auth_id=args.project_auth_id,
)
elif args.action == "google-list-authz-groups":
google_list_authz_groups(DB)
elif args.action == "token-create":
keys_path = getattr(args, "keys-dir", os.path.join(ROOT_DIR, "keys"))
keypairs = keys.load_keypairs(keys_path)
keypair = keypairs[-1]
kid = getattr(args, "kid")
if kid:
for try_keypair in keypairs:
if try_keypair.kid == kid:
keypair = try_keypair
break
jwt_creator = JWTCreator(
DB,
BASE_URL,
kid=keypair.kid,
private_key=keypair.private_key,
username=args.username,
scopes=args.scopes,
expires_in=args.exp,
)
token_type = str(args.type).strip().lower()
if token_type == "access_token" or token_type == "access":
print(jwt_creator.create_access_token().token)
elif token_type == "refresh_token" or token_type == "refresh":
print(jwt_creator.create_refresh_token().token)
else:
print(
'invalid token type "{}"; expected "access" or "refresh"'.format(
token_type
)
)
sys.exit(1)
elif args.action == "force-link-google":
exp = force_update_google_link(
DB,
username=args.username,
google_email=args.google_email,
expires_in=args.expires_in,
)
print(exp)
elif args.action == "notify-problem-users":
notify_problem_users(
DB, args.emails, args.auth_ids, args.check_linking, args.google_project_id
)
elif args.action == "migrate":
migrate_database(DB)
elif args.action == "update-visas":
update_user_visas(
DB,
chunk_size=args.chunk_size,
concurrency=args.concurrency,
thread_pool_size=args.thread_pool_size,
buffer_size=args.buffer_size,
)
if __name__ == "__main__":
main()
| true | true |
1c32f183d08380b6732a9658109e07f7e8649d2e | 240 | py | Python | d-series/d462.py | TheLurkingCat/ZeroJudge | 6fc49c54a45e2b4b3a8d04b7a5a1fc81a2ff4eee | [
"MIT"
] | 1 | 2018-10-21T10:03:42.000Z | 2018-10-21T10:03:42.000Z | d-series/d462.py | TheLurkingCat/ZeroJudge | 6fc49c54a45e2b4b3a8d04b7a5a1fc81a2ff4eee | [
"MIT"
] | null | null | null | d-series/d462.py | TheLurkingCat/ZeroJudge | 6fc49c54a45e2b4b3a8d04b7a5a1fc81a2ff4eee | [
"MIT"
] | 2 | 2018-10-12T16:40:11.000Z | 2021-04-05T12:05:36.000Z | while True:
try:
a, n, i, k = [int(x) for x in input().split()]
except EOFError:
break
ans = str(pow(a, n))
print(ans[i-1:i+k-1])
print("From tomcat6 Fri Mar 15 09:53:56 2013\nTo: world\"\nSubject: \"Hello")
| 26.666667 | 77 | 0.558333 | while True:
try:
a, n, i, k = [int(x) for x in input().split()]
except EOFError:
break
ans = str(pow(a, n))
print(ans[i-1:i+k-1])
print("From tomcat6 Fri Mar 15 09:53:56 2013\nTo: world\"\nSubject: \"Hello")
| true | true |
1c32f4c1f8447fcc226b1a407730aab9d1d7083a | 531 | py | Python | setup.py | baek2sm/aithon | bfd9e3f19933026107fb40066bf8db0cfd845242 | [
"MIT"
] | 1 | 2021-12-04T01:18:59.000Z | 2021-12-04T01:18:59.000Z | setup.py | baek2sm/hackathon | bfd9e3f19933026107fb40066bf8db0cfd845242 | [
"MIT"
] | null | null | null | setup.py | baek2sm/hackathon | bfd9e3f19933026107fb40066bf8db0cfd845242 | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name='aithon',
version='0.0.1',
license='MIT',
author='SeungBaek Hong',
author_email='baek2sm@gmail.com',
description='Aithon is a library for use in AI hackathon.',
long_description=open('README.md').read(),
url='https://github.com/baek2sm/aithon',
packages=setuptools.find_packages(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.9',
],
) | 29.5 | 63 | 0.640301 | import setuptools
setuptools.setup(
name='aithon',
version='0.0.1',
license='MIT',
author='SeungBaek Hong',
author_email='baek2sm@gmail.com',
description='Aithon is a library for use in AI hackathon.',
long_description=open('README.md').read(),
url='https://github.com/baek2sm/aithon',
packages=setuptools.find_packages(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.9',
],
) | true | true |
1c32f4dce4709950ca69294b7a7f01bd0afc89b8 | 18,100 | py | Python | thread.py | yalhariri/twitter_collector | a411daf029ce86be0f2db0228de8ad360b9ef9ab | [
"MIT"
] | null | null | null | thread.py | yalhariri/twitter_collector | a411daf029ce86be0f2db0228de8ad360b9ef9ab | [
"MIT"
] | null | null | null | thread.py | yalhariri/twitter_collector | a411daf029ce86be0f2db0228de8ad360b9ef9ab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 11 15:39:05 2020
@author: yalha
"""
import threading
import json
import time
import re
import nltk
import random
tokenizer = nltk.TweetTokenizer()
import requests
import os
from os import path, listdir, remove
from os.path import exists, join
import logging
from logging.handlers import TimedRotatingFileHandler as _TimedRotatingFileHandler
import gzip
import shutil
import datetime
config_folder = "./../.config/"
current_conf = ''
class Tweets_Indexer(threading.Thread):
def __init__(self, tweets, solr, selected_fields, file_name_output, lock, core=None, solr_active=True): #TODO: in Quds server make solr_active=False
threading.Thread.__init__(self)
self.lock = lock
self.tweets = tweets
self.solr = solr
self.core = core
self.selected_fields = selected_fields
self.file_name_output = file_name_output
if solr != None:
self.solr_active = solr_active
else:
self.solr_active = False
def write_to_solr(self, tweets):
"""A function to write the tweets object to file and to solr (if solr_active is True).
Args:
tweets (dict): A dictionary that holds all the relevant information from the tweets.
"""
file_name = 'other_terms.json'
if self.solr_active:
file_name = 'solr_'+self.core+'.json'
missed_data_written = 0
Error_occured = False
tweets_list = [tweets[k] for k in tweets.keys()]
try:
for item in tweets_list:
if "_version_" in item.keys():
item.pop('_version_')
status = ''
i = 0
while ('"status">0<' not in status and i < 3):
status = self.solr.add(tweets_list, fieldUpdates={'retweeters':'add-distinct', 'sentiment':'set', 'sentiment_distribution':'set', 'language':'set', 'features':'set', 'topic':'set', 'user_location':'set', 'location_gps':'set', 'user_location_original':'set', 'location_language':'set', 'place':'set', 'hashtags':'add-distinct', 'urls':'add-distinct', 'retweet_count':'set','favorite_count':'set', 'emotion':'set', 'emotion_distribution':'set'})
i+=1
if '"status">0<' not in status: #Only add the missed information when solr not accessed proberly.
Error_occured = True
self.write_data_to_file(tweets, folder='missed_data')
missed_data_written = 1
else:
missed_data_written = 2
except Exception:
if missed_data_written != 1 and Error_occured == True:
self.write_data_to_file(tweets, folder='missed_data')
missed_data_written = 1
logger.warning('exception at write to solr 001: Process continued, and missed data recorded in missed data.')
pass
if missed_data_written == 2:
logger.info('No exception occured, Data has been written in solr')
else:
logger.info('solr not activated.')
try:
self.write_data_to_file(tweets, folder='')
logger.info('Data has been written in crawled_data.')
except Exception as exp:
logger.warning('Exception at write data to file! ' + exp)
pass
def write_data_to_file(self, tweets, folder):
"""A function to write data into a file
Args:
tweets (dict): the dictionary of the tweets with their extracted information.
file_name (str): the file name in which the data will be writen to.
folder (str): the folder in which the file will be written to.
"""
now = datetime.datetime.now()
nows = now.strftime("%m-%d-%H")
day_output_folder = os.path.abspath('%s/%s/%s'%(self.file_name_output, folder, now.strftime('%Y')))
if not os.path.exists(day_output_folder):
os.makedirs(day_output_folder)
out_put_file = os.path.abspath('%s/%s'%(day_output_folder, str(nows)))
with open(out_put_file,'a+',encoding='utf-8') as fout:
for k in tweets.keys():
fout.write('%s\n'%json.dumps(tweets[k], ensure_ascii=False))
def run(self):
if self.selected_fields:
draft_tweets = extract_tweets_info(self.tweets)
if self.solr:
self.write_to_solr(draft_tweets)
else:
draft_tweets = {tweet['id']: tweet for tweet in self.tweets}
self.write_data_to_file(draft_tweets, folder='')
class TimedRotatingFileHandler(_TimedRotatingFileHandler):
"""A class to manage the backup compression.
Args:
_TimedRotatingFileHandler ([type]): [description]
"""
def __init__(self, filename="", when="midnight", interval=1, backupCount=0):
super(TimedRotatingFileHandler, self).__init__(
filename=filename,
when=when,
interval=int(interval),
backupCount=int(backupCount))
def doRollover(self):
super(TimedRotatingFileHandler, self).doRollover()
'''
dirname, basename = os.path.split(self.baseFilename)
if Tweets_Indexer.core == 'Google':
to_compress = [
join(dirname, f) for f in listdir(dirname) if f.startswith(
basename) and not f.endswith((".gz", ".log"))]
for f in to_compress:
if exists(f):
with open(f, "rb") as _old, gzip.open(f + ".gz", "wb") as _new:
shutil.copyfileobj(_old, _new)
remove(f)
'''
for directory in ['../missed_data', '../crawled_data']:
if not os.path.exists(directory):
os.makedirs(directory)
to_compress = [
join(directory, f) for f in listdir(directory) if not f.endswith((".gz", ".log"))]
for f in to_compress:
now = datetime.datetime.now()
nows = now.strftime("%Y-%m-%d-%H-%M-%S")
if exists(f):
with open(f, "rb") as _old, gzip.open(f + nows + ".gz", "wb") as _new:
shutil.copyfileobj(_old, _new)
remove(f)
log_folder = './../.log/'
if not os.path.exists(log_folder):
os.makedirs(log_folder)
logger = logging.getLogger(__name__)
filename = log_folder + __name__ + '.log'
file_handler = TimedRotatingFileHandler(filename=filename, when='midnight', interval=1, backupCount=0)#when midnight, s (seconds), M (minutes)... etc
#file_handler = TimedRotatingFileHandler(filename=filename, when='M', interval=2, backupCount=0)#when midnight, s (seconds), M (minutes)... etc
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
def get_sentiments(tweets):
"""A function to access sentiment analysis service.
'id': tweet['id'], "full_text": item["full_text"], "language"
Args:
tweets (dict): A dictionary of the tweets object. It should have the following keys:
1) 'id': tweet id,
2) 'full_text': the full_text of the tweet,
3) 'language': the detected language of the tweet.
Returns:
dict: A dictionary that hold the sentiment information as retrived from its service. The keys are the tweets ids and values are dicts that contain:
'sentiment' : the sentiment information as being analysed from the text, (positive, nuetral or negative)
'sentiment_distribution' : a list that has the distribution of the three sentiments (the highest would be at the index of the selected sentiment)
"""
headers = {'content-type': 'application/json; charset=utf-8'}
url = 'http://127.0.0.1:7777/api/predict' #Other Project
data = json.dumps(tweets, ensure_ascii=False)
rs = -1
trials = 1
while (rs != 200 and trials <= 3):
try:
response = requests.post(url=url, headers = headers , data=data.encode('utf-8'))
rs = response.status_code
except Exception:
rs = -1
time.sleep(random.randrange(1,3))
finally:
trials += 1
if rs != 200:
logger.warning('Sentiment analyzer not found. Error code: ' + str(rs))
return None
return json.loads(response.content)
def get_topics_features(tweets):
"""A prototype function to extract the features from the tweets' text.
Args:
tweets ([type]): not implemented yet.
Returns:
dict: dictionary that hold the topics and features (currently with default values).
"""
#TODO: get from topic detection api?
return {x['id']: {'topic' : 'Tpc_1', 'features' : 'FTR_1'} for x in tweets}
def get_location(tweets):
"""A function to access location service.
Args:
tweets (dict): A dictionary of the tweets object. It should have the following keys:
1) 'id': tweet id,
2) 'user': the user object as exists in the tweet object,
3) 'geo': the geo field from the tweet,
4) 'coordinates': the coordinates field from the tweet,
5) 'place': the place field from the tweet,
6) 'language': the detected language of the tweet.
Returns:
dict: A dictionary that hold the location information as retrived from location service. The keys are the tweets ids and values are dicts that contain
'user' : the location information from user object
'tweet' : the location information from the tweet object (location_gps)
'language' (optional): the location as extracted from the tweets' language
"""
url1 = "http://127.0.0.1:10000/api/get_locations"
#url1= "http://185.32.253.54:10000/api/get_locations"
'''
with open('sample.json' , 'a' , encoding='utf-8') as fout:
fout.write('%s\n'%json.dumps(tweets,ensure_ascii=False))
'''
data = json.dumps(tweets,ensure_ascii=False)
headers = {'content-type': 'application/json; charset=utf-8'}
# sending get request and saving the response as response object
rs = -1
trials = 1
while (rs != 200 and trials <= 3):
try:
response = requests.post(url=url1, data=data.encode('utf-8'), headers=headers)
rs = response.status_code
except Exception:
rs = -1
finally:
trials += 1
if rs != 200:
logger.warning('Location service not found. Error code: ' + str(rs))
return None
return json.loads(response.content)
def extract_tweets_info(tweets):
"""A function to extract the contents of the passed tweets. This function calls the function get_tweet_contents to extract the data from the tweet object, then it access other services to get the relevant info.
Args:
tweets (dict): A dictionary that is the tweet object.
Returns:
dict: A dictionary that holds the extracted tweets with their relevant information.
"""
draft_tweets = dict()
loc_dict = dict()
sentiment = dict()
feature = []
for tweet in tweets:
item = get_tweet_contents(tweet)
item['language'] = tweet['lang']
#print(item['language'])
draft_tweets[item['id']] = item
loc_dict[tweet['id']] = {'id': tweet['id'], 'user': tweet['user'], 'geo': tweet['geo'] , 'coordinates': tweet['coordinates'] , 'place': tweet['place'] , 'language': item['language'] }
try:
#sentiment[tweet["id"]] = {'id': tweet['id'], "full_text": item["full_text"], "language":item['language'][0][0]}
sentiment[tweet["id"]] = {'id': tweet['id'], "full_text": item["full_text"], "language":item['language']}
except Exception:
sentiment[tweet["id"]] = {'id': tweet['id'], "full_text": item["full_text"], "language":'en'}
pass
feature.append({'id': item['id'], 'full_text': item['full_text']})
locations = get_location(loc_dict)
if locations != None:
for k in locations.keys():
if int(k) in draft_tweets.keys():
draft_tweets[int(k)]['user_location'] = locations[k]['user']
draft_tweets[int(k)]['location_gps'] = locations[k]['tweet']
if 'language' in locations[k].keys():
draft_tweets[int(k)]['location_language'] = locations[k]['language']
try:
#print(sentiment)
sentiments = get_sentiments(sentiment)
except:
sentiments = None
if sentiments != None:
for k in sentiments.keys():
item = sentiments[k]
#print(item)
if int(k) in draft_tweets.keys():
draft_tweets[int(k)]['sentiment'] = item #TODO Here we change from dict to string... think how to optimize it!
'''
draft_tweets[int(k)]['sentiment'] = item['sentiment']
draft_tweets[int(k)]['sentiment_distribution'] = [item['sentiment_distribution']['negative'], item['sentiment_distribution']['neutral'], item['sentiment_distribution']['positive']]
if 'emotion' in item.keys():
draft_tweets[int(k)]['emotion'] = item['emotion']
draft_tweets[int(k)]['emotion_distribution'] = item['emotion_distribution']
'''
features = get_topics_features(feature)
for k in features.keys():
if k in draft_tweets.keys():
draft_tweets[k]['topic'] = features[k]['topic']
draft_tweets[k]['features'] = features[k]['features']
return draft_tweets
def get_urls_from_object(tweet_obj):
"""Extract urls from a tweet object
Args:
tweet_obj (dict): A dictionary that is the tweet object, extended_entities or extended_tweet
Returns:
list: list of urls that are extracted from the tweet.
"""
url_list = []
if "entities" in tweet_obj.keys():
if "urls" in tweet_obj["entities"].keys():
for x in tweet_obj["entities"]["urls"]:
try:
url_list.append(x["expanded_url"] if "expanded_url" in x.keys() else x["url"])
except Exception:
pass
return url_list
def get_platform(source = '<PLT_1>'):
"""A function to extract the platform from a source string.
Args:
source (str, optional): source string that is usually contains the platform that is used to post the tweet. Defaults to '<PLT_1>'.
Returns:
str: the platform if found, otherwise the stamp PLT_1. This stamp is used for any further updates.
"""
platform = 'PLT_1'
try:
platform = re.sub('[<>]', '\t', source).split('\t')[2]
platform = platform.replace('Twitter for','').replace('Twitter','')
except:
platform = 'PLT_1'
return platform.strip()
def get_tweet_contents(tweet):
"""A function to extract the contents of the passed tweet object.
Args:
tweet (dict): A dictionary that is the tweet object.
Returns:
dict: A dictionary that holds the extracted information.
"""
tweet_obj = dict()
retweeter = []
if 'retweeted_status' in tweet.keys():
tweet_obj = tweet["retweeted_status"]
retweeter = [(tweet["user"]["screen_name"]).replace('@','')]
else:
tweet_obj = tweet
tweet_n_obj = dict()
tweet_n_obj['id'] = tweet_obj['id']
tweet_n_obj['created_at'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.strptime(tweet_obj["created_at"],'%a %b %d %H:%M:%S +0000 %Y'))
tweet_n_obj['user_screen_name'] = tweet_obj["user"]["screen_name"]
tweet_n_obj['user_name'] = tweet_obj["user"]["name"]
tweet_n_obj['user_id'] = tweet_obj["user"]["id"]
tweet_n_obj['users_followers_count'] = tweet_obj["user"]["followers_count"]
tweet_n_obj['users_friends_count'] = tweet_obj["user"]["friends_count"]
tweet_n_obj['user_location_original'] = tweet_obj["user"]["location"]
if 'place' in tweet_obj.keys():
if tweet_obj['place'] != None:
if 'full_name' in tweet_obj['place'].keys():
tweet_n_obj['place'] = tweet_obj["place"]["full_name"]
tweet_n_obj['retweet_count'] = tweet_obj["retweet_count"]
tweet_n_obj['favorite_count'] = tweet_obj["favorite_count"]
if not tweet_obj["user"]["description"]:
tweet_n_obj['users_description'] = ''
else:
tweet_n_obj['users_description'] = re.sub("[\n]+"," ",re.sub("[\r\n]+"," ",tweet_obj["user"]["description"]))
full_text = ""
if 'extended_tweet' in tweet_obj.keys():
tweet_obj['full_text'] = tweet_obj['extended_tweet']['full_text']
tweet_obj['text'] = tweet_obj['extended_tweet']['full_text']
if 'full_text' in tweet_obj.keys():
full_text = tweet_obj['full_text']
elif 'text' in tweet_obj.keys():
full_text = tweet_obj['text']
tweet_n_obj['retweeters'] = retweeter
tweet_n_obj['full_text'] = re.sub("[\n]+"," ",re.sub("[\r\n]+"," ",full_text.strip()))
tweet_n_obj['hashtags'] = [x for x in tokenizer.tokenize(re.sub("#"," #",full_text.strip())) if x.startswith('#')]
tweet_n_obj['mentions'] = [x for x in tokenizer.tokenize(re.sub("@"," @",full_text.strip())) if x.startswith('@')]
tweet_n_obj['platform'] = get_platform(tweet['source']) if 'source' in tweet.keys() else get_platform()
tweet_n_obj['urls'] = get_urls_from_object(tweet_obj)
tweet_n_obj['urls'] += get_urls_from_object(tweet_obj["extended_entities"]) if "extended_entities" in tweet_obj.keys() else []
tweet_n_obj['urls'] += get_urls_from_object(tweet_obj["extended_tweet"]) if "extended_tweet" in tweet_obj.keys() else []
tweet_n_obj['urls'] = list(set(tweet_n_obj['urls']))
tweet_n_obj['domain'] = ''
return tweet_n_obj
| 43.095238 | 463 | 0.609337 |
import threading
import json
import time
import re
import nltk
import random
tokenizer = nltk.TweetTokenizer()
import requests
import os
from os import path, listdir, remove
from os.path import exists, join
import logging
from logging.handlers import TimedRotatingFileHandler as _TimedRotatingFileHandler
import gzip
import shutil
import datetime
config_folder = "./../.config/"
current_conf = ''
class Tweets_Indexer(threading.Thread):
def __init__(self, tweets, solr, selected_fields, file_name_output, lock, core=None, solr_active=True):
threading.Thread.__init__(self)
self.lock = lock
self.tweets = tweets
self.solr = solr
self.core = core
self.selected_fields = selected_fields
self.file_name_output = file_name_output
if solr != None:
self.solr_active = solr_active
else:
self.solr_active = False
def write_to_solr(self, tweets):
file_name = 'other_terms.json'
if self.solr_active:
file_name = 'solr_'+self.core+'.json'
missed_data_written = 0
Error_occured = False
tweets_list = [tweets[k] for k in tweets.keys()]
try:
for item in tweets_list:
if "_version_" in item.keys():
item.pop('_version_')
status = ''
i = 0
while ('"status">0<' not in status and i < 3):
status = self.solr.add(tweets_list, fieldUpdates={'retweeters':'add-distinct', 'sentiment':'set', 'sentiment_distribution':'set', 'language':'set', 'features':'set', 'topic':'set', 'user_location':'set', 'location_gps':'set', 'user_location_original':'set', 'location_language':'set', 'place':'set', 'hashtags':'add-distinct', 'urls':'add-distinct', 'retweet_count':'set','favorite_count':'set', 'emotion':'set', 'emotion_distribution':'set'})
i+=1
if '"status">0<' not in status:
Error_occured = True
self.write_data_to_file(tweets, folder='missed_data')
missed_data_written = 1
else:
missed_data_written = 2
except Exception:
if missed_data_written != 1 and Error_occured == True:
self.write_data_to_file(tweets, folder='missed_data')
missed_data_written = 1
logger.warning('exception at write to solr 001: Process continued, and missed data recorded in missed data.')
pass
if missed_data_written == 2:
logger.info('No exception occured, Data has been written in solr')
else:
logger.info('solr not activated.')
try:
self.write_data_to_file(tweets, folder='')
logger.info('Data has been written in crawled_data.')
except Exception as exp:
logger.warning('Exception at write data to file! ' + exp)
pass
def write_data_to_file(self, tweets, folder):
now = datetime.datetime.now()
nows = now.strftime("%m-%d-%H")
day_output_folder = os.path.abspath('%s/%s/%s'%(self.file_name_output, folder, now.strftime('%Y')))
if not os.path.exists(day_output_folder):
os.makedirs(day_output_folder)
out_put_file = os.path.abspath('%s/%s'%(day_output_folder, str(nows)))
with open(out_put_file,'a+',encoding='utf-8') as fout:
for k in tweets.keys():
fout.write('%s\n'%json.dumps(tweets[k], ensure_ascii=False))
def run(self):
if self.selected_fields:
draft_tweets = extract_tweets_info(self.tweets)
if self.solr:
self.write_to_solr(draft_tweets)
else:
draft_tweets = {tweet['id']: tweet for tweet in self.tweets}
self.write_data_to_file(draft_tweets, folder='')
class TimedRotatingFileHandler(_TimedRotatingFileHandler):
def __init__(self, filename="", when="midnight", interval=1, backupCount=0):
super(TimedRotatingFileHandler, self).__init__(
filename=filename,
when=when,
interval=int(interval),
backupCount=int(backupCount))
def doRollover(self):
super(TimedRotatingFileHandler, self).doRollover()
for directory in ['../missed_data', '../crawled_data']:
if not os.path.exists(directory):
os.makedirs(directory)
to_compress = [
join(directory, f) for f in listdir(directory) if not f.endswith((".gz", ".log"))]
for f in to_compress:
now = datetime.datetime.now()
nows = now.strftime("%Y-%m-%d-%H-%M-%S")
if exists(f):
with open(f, "rb") as _old, gzip.open(f + nows + ".gz", "wb") as _new:
shutil.copyfileobj(_old, _new)
remove(f)
log_folder = './../.log/'
if not os.path.exists(log_folder):
os.makedirs(log_folder)
logger = logging.getLogger(__name__)
filename = log_folder + __name__ + '.log'
file_handler = TimedRotatingFileHandler(filename=filename, when='midnight', interval=1, backupCount=0)
: %(levelname)s : %(name)s : %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
def get_sentiments(tweets):
headers = {'content-type': 'application/json; charset=utf-8'}
url = 'http://127.0.0.1:7777/api/predict'
data = json.dumps(tweets, ensure_ascii=False)
rs = -1
trials = 1
while (rs != 200 and trials <= 3):
try:
response = requests.post(url=url, headers = headers , data=data.encode('utf-8'))
rs = response.status_code
except Exception:
rs = -1
time.sleep(random.randrange(1,3))
finally:
trials += 1
if rs != 200:
logger.warning('Sentiment analyzer not found. Error code: ' + str(rs))
return None
return json.loads(response.content)
def get_topics_features(tweets):
return {x['id']: {'topic' : 'Tpc_1', 'features' : 'FTR_1'} for x in tweets}
def get_location(tweets):
url1 = "http://127.0.0.1:10000/api/get_locations"
data = json.dumps(tweets,ensure_ascii=False)
headers = {'content-type': 'application/json; charset=utf-8'}
rs = -1
trials = 1
while (rs != 200 and trials <= 3):
try:
response = requests.post(url=url1, data=data.encode('utf-8'), headers=headers)
rs = response.status_code
except Exception:
rs = -1
finally:
trials += 1
if rs != 200:
logger.warning('Location service not found. Error code: ' + str(rs))
return None
return json.loads(response.content)
def extract_tweets_info(tweets):
draft_tweets = dict()
loc_dict = dict()
sentiment = dict()
feature = []
for tweet in tweets:
item = get_tweet_contents(tweet)
item['language'] = tweet['lang']
draft_tweets[item['id']] = item
loc_dict[tweet['id']] = {'id': tweet['id'], 'user': tweet['user'], 'geo': tweet['geo'] , 'coordinates': tweet['coordinates'] , 'place': tweet['place'] , 'language': item['language'] }
try:
sentiment[tweet["id"]] = {'id': tweet['id'], "full_text": item["full_text"], "language":item['language']}
except Exception:
sentiment[tweet["id"]] = {'id': tweet['id'], "full_text": item["full_text"], "language":'en'}
pass
feature.append({'id': item['id'], 'full_text': item['full_text']})
locations = get_location(loc_dict)
if locations != None:
for k in locations.keys():
if int(k) in draft_tweets.keys():
draft_tweets[int(k)]['user_location'] = locations[k]['user']
draft_tweets[int(k)]['location_gps'] = locations[k]['tweet']
if 'language' in locations[k].keys():
draft_tweets[int(k)]['location_language'] = locations[k]['language']
try:
sentiments = get_sentiments(sentiment)
except:
sentiments = None
if sentiments != None:
for k in sentiments.keys():
item = sentiments[k]
if int(k) in draft_tweets.keys():
draft_tweets[int(k)]['sentiment'] = item
features = get_topics_features(feature)
for k in features.keys():
if k in draft_tweets.keys():
draft_tweets[k]['topic'] = features[k]['topic']
draft_tweets[k]['features'] = features[k]['features']
return draft_tweets
def get_urls_from_object(tweet_obj):
url_list = []
if "entities" in tweet_obj.keys():
if "urls" in tweet_obj["entities"].keys():
for x in tweet_obj["entities"]["urls"]:
try:
url_list.append(x["expanded_url"] if "expanded_url" in x.keys() else x["url"])
except Exception:
pass
return url_list
def get_platform(source = '<PLT_1>'):
platform = 'PLT_1'
try:
platform = re.sub('[<>]', '\t', source).split('\t')[2]
platform = platform.replace('Twitter for','').replace('Twitter','')
except:
platform = 'PLT_1'
return platform.strip()
def get_tweet_contents(tweet):
tweet_obj = dict()
retweeter = []
if 'retweeted_status' in tweet.keys():
tweet_obj = tweet["retweeted_status"]
retweeter = [(tweet["user"]["screen_name"]).replace('@','')]
else:
tweet_obj = tweet
tweet_n_obj = dict()
tweet_n_obj['id'] = tweet_obj['id']
tweet_n_obj['created_at'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.strptime(tweet_obj["created_at"],'%a %b %d %H:%M:%S +0000 %Y'))
tweet_n_obj['user_screen_name'] = tweet_obj["user"]["screen_name"]
tweet_n_obj['user_name'] = tweet_obj["user"]["name"]
tweet_n_obj['user_id'] = tweet_obj["user"]["id"]
tweet_n_obj['users_followers_count'] = tweet_obj["user"]["followers_count"]
tweet_n_obj['users_friends_count'] = tweet_obj["user"]["friends_count"]
tweet_n_obj['user_location_original'] = tweet_obj["user"]["location"]
if 'place' in tweet_obj.keys():
if tweet_obj['place'] != None:
if 'full_name' in tweet_obj['place'].keys():
tweet_n_obj['place'] = tweet_obj["place"]["full_name"]
tweet_n_obj['retweet_count'] = tweet_obj["retweet_count"]
tweet_n_obj['favorite_count'] = tweet_obj["favorite_count"]
if not tweet_obj["user"]["description"]:
tweet_n_obj['users_description'] = ''
else:
tweet_n_obj['users_description'] = re.sub("[\n]+"," ",re.sub("[\r\n]+"," ",tweet_obj["user"]["description"]))
full_text = ""
if 'extended_tweet' in tweet_obj.keys():
tweet_obj['full_text'] = tweet_obj['extended_tweet']['full_text']
tweet_obj['text'] = tweet_obj['extended_tweet']['full_text']
if 'full_text' in tweet_obj.keys():
full_text = tweet_obj['full_text']
elif 'text' in tweet_obj.keys():
full_text = tweet_obj['text']
tweet_n_obj['retweeters'] = retweeter
tweet_n_obj['full_text'] = re.sub("[\n]+"," ",re.sub("[\r\n]+"," ",full_text.strip()))
tweet_n_obj['hashtags'] = [x for x in tokenizer.tokenize(re.sub("#"," #",full_text.strip())) if x.startswith('#')]
tweet_n_obj['mentions'] = [x for x in tokenizer.tokenize(re.sub("@"," @",full_text.strip())) if x.startswith('@')]
tweet_n_obj['platform'] = get_platform(tweet['source']) if 'source' in tweet.keys() else get_platform()
tweet_n_obj['urls'] = get_urls_from_object(tweet_obj)
tweet_n_obj['urls'] += get_urls_from_object(tweet_obj["extended_entities"]) if "extended_entities" in tweet_obj.keys() else []
tweet_n_obj['urls'] += get_urls_from_object(tweet_obj["extended_tweet"]) if "extended_tweet" in tweet_obj.keys() else []
tweet_n_obj['urls'] = list(set(tweet_n_obj['urls']))
tweet_n_obj['domain'] = ''
return tweet_n_obj
| true | true |
1c32f525161b731d8cc181719533cc70b454f4ed | 1,992 | py | Python | 010/infix_to_postfix.py | gauthamp10/100DaysOfCode | bcc1d02d3cf7ec78770a7645363a88e9930e2115 | [
"MIT"
] | null | null | null | 010/infix_to_postfix.py | gauthamp10/100DaysOfCode | bcc1d02d3cf7ec78770a7645363a88e9930e2115 | [
"MIT"
] | null | null | null | 010/infix_to_postfix.py | gauthamp10/100DaysOfCode | bcc1d02d3cf7ec78770a7645363a88e9930e2115 | [
"MIT"
] | null | null | null | """A funciton to convert a given infix expression
to postfix expression using Shunting Yard Algorithm"""
from stack import MyStack
def infix_to_postfix(infix):
"""Converts given infix expression to postfix expression
using Shunting Yard Algorithm"""
#stack to temporarily store operators and paranthesis
stack = MyStack(size= len(infix)+1)
postfix = [] # a list to store postifix expression
# Returns True if char is an operand
is_operand = lambda char: char.isalpha() or char.isnumeric()
# Returns the precedence of char from PRIORITY dict"""
PRIORITY = {"+": 1, "-": 1, "*": 2, "/": 2, "%": 2, "^": 3}
precedence = lambda char: PRIORITY[char] if char in PRIORITY else -1
for char in infix:
if is_operand(char):
postfix.append(char)
elif char not in ['(',')']:
while not stack.is_empty() and precedence(char) <= precedence(stack.top()):
#Add elements from stack until stack is not empty and precedence of \n
#char is less than the top most stack element
postfix.append(stack.pop())
stack.push(char)
elif char == "(":
stack.push(char)
elif char == ")":
while not stack.is_empty() and stack.top() != "(":
postfix.append(stack.pop())
if stack.top() != "(":
raise ValueError("Parathesis Mismatch!")
stack.pop()
while not stack.is_empty():
# pop out and add all existing elements from stack and add in onto postfix
postfix.append(stack.pop())
return " ".join(postfix)
def test_cases():
"""Some sample test cases"""
assert infix_to_postfix("(A+B)*(C+D)") == "A B + C D + *"
assert infix_to_postfix("a+b*(c^d-e)^(f+g*h)-i") == "a b c d ^ e - f g h * + ^ * + i -"
assert infix_to_postfix("7") == "7"
assert infix_to_postfix("") == ""
print("Test success!")
test_cases()
| 38.307692 | 91 | 0.582831 |
from stack import MyStack
def infix_to_postfix(infix):
stack = MyStack(size= len(infix)+1)
postfix = []
is_operand = lambda char: char.isalpha() or char.isnumeric()
PRIORITY = {"+": 1, "-": 1, "*": 2, "/": 2, "%": 2, "^": 3}
precedence = lambda char: PRIORITY[char] if char in PRIORITY else -1
for char in infix:
if is_operand(char):
postfix.append(char)
elif char not in ['(',')']:
while not stack.is_empty() and precedence(char) <= precedence(stack.top()):
#Add elements from stack until stack is not empty and precedence of \n
#char is less than the top most stack element
postfix.append(stack.pop())
stack.push(char)
elif char == "(":
stack.push(char)
elif char == ")":
while not stack.is_empty() and stack.top() != "(":
postfix.append(stack.pop())
if stack.top() != "(":
raise ValueError("Parathesis Mismatch!")
stack.pop()
while not stack.is_empty():
# pop out and add all existing elements from stack and add in onto postfix
postfix.append(stack.pop())
return " ".join(postfix)
def test_cases():
assert infix_to_postfix("(A+B)*(C+D)") == "A B + C D + *"
assert infix_to_postfix("a+b*(c^d-e)^(f+g*h)-i") == "a b c d ^ e - f g h * + ^ * + i -"
assert infix_to_postfix("7") == "7"
assert infix_to_postfix("") == ""
print("Test success!")
test_cases()
| true | true |
1c32f58b1962a49b338c7677b68e6852f2ba5390 | 3,346 | py | Python | middleware/legato/templates/common/bsp_utils.py | automaate/gfx3.8 | 55bf94302f00c8d513c84d910185cef2ca6b5be2 | [
"0BSD"
] | null | null | null | middleware/legato/templates/common/bsp_utils.py | automaate/gfx3.8 | 55bf94302f00c8d513c84d910185cef2ca6b5be2 | [
"0BSD"
] | null | null | null | middleware/legato/templates/common/bsp_utils.py | automaate/gfx3.8 | 55bf94302f00c8d513c84d910185cef2ca6b5be2 | [
"0BSD"
] | null | null | null | # coding: utf-8
##############################################################################
# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
##############################################################################
bspSupportList = []
bspSupportObjList = None
class bspSupportObj:
def __init__(self, pinConfig, componentActivateList, componentDeactivateList, componentAutoConnectList, eventCallbackFxn):
self.pinConfig = pinConfig
self.componentActivateList = componentActivateList
self.componentDeactivateList = componentDeactivateList
self.componentAutoConnectList = componentAutoConnectList
self.eventCallbackFxn = eventCallbackFxn
def getPinConfig(self):
return self.pinConfig
def getComponentActivateList(self):
return self.componentActivateList
def getComponentDeactivateList(self):
return self.componentDeactivateList
def getComponentAutoConnectList(self):
return self.componentAutoConnectList
def getEventCallbackFxn(self):
return self.eventCallbackFxn
def addBSPSupport(bspID, configID, bspSupportObject):
global bspSupportList
global bspSupportObjList
keyID = bspID
if (configID != None):
keyID += configID
if (bspSupportObjList == None):
bspSupportObjList = dict([(keyID, bspSupportObject)])
else:
bspSupportObjList[keyID] = bspSupportObject
if bspID not in bspSupportList:
bspSupportList += [bspID]
print("added support for BSP " + bspID + " keyID " + keyID)
def getBSPSupportNode(bspID, configID):
global bspSupportObjList
keyID = bspID
if (configID != None):
keyID += configID
return bspSupportObjList[keyID]
# Gets the ID of the first supported BSP component in the project
# If none is supported, returns "None"
def getSupportedBSP():
global bspSupportList
if (bspSupportList == None):
return None
activeComponentsList = Database.getActiveComponentIDs()
print("bsp list " + str(bspSupportList))
print("component list " + str(activeComponentsList))
for keyID in bspSupportList:
if keyID in activeComponentsList:
return str(keyID)
return None
def getSupportedBSPList():
global bspSupportList
return bspSupportList
| 33.46 | 124 | 0.724447 | true | true | |
1c32f5fc921fedc717d9757873c7c0e3b35ca35d | 4,333 | py | Python | satsense/features/feature.py | fossabot/satsense | 6666dd01a6988a86319c71a8f8802bf4b096c550 | [
"Apache-2.0"
] | 22 | 2018-03-14T10:29:38.000Z | 2022-03-29T10:54:51.000Z | satsense/features/feature.py | fdbesanto2/satsense | b0fa650193995a30328f26a36ebab2437c0e37ef | [
"Apache-2.0"
] | 49 | 2018-05-25T13:28:07.000Z | 2021-07-31T09:48:02.000Z | satsense/features/feature.py | fdbesanto2/satsense | b0fa650193995a30328f26a36ebab2437c0e37ef | [
"Apache-2.0"
] | 11 | 2018-04-24T08:55:28.000Z | 2021-02-17T22:32:05.000Z | from abc import ABC, abstractmethod
class Feature(ABC):
"""
Feature superclass.
Parameters
----------
window_shapes : list[tuple]
List of tuples of window shapes to calculate the feature on
**kwargs : dict
Keyword arguments for the feature
Attributes
----------
base_image
"""
base_image = None
"""
The base image this feature is calculated on
``Must be set by implementing classes``
"""
size = None
"""
The size of the feature in array shape
``Must be set by implementing classes``
"""
def __init__(self, window_shapes, **kwargs):
self._indices = {}
self._length = 0
self._windows = tuple(sorted(window_shapes, reverse=True))
self.kwargs = kwargs
self.name = self.__class__.__name__
def __call__(self, window):
return self.compute(window, **self.kwargs)
@staticmethod
@abstractmethod
def compute(window, **kwargs):
"""
Compute the feature on the window
This function needs to be set by the implementation subclass
``compute = staticmethod(my_feature_calculation)``
Parameters
----------
window : tuple[int]
The shape of the window
**kwargs: dict
The keyword arguments for the compustation
"""
pass
@property
def windows(self):
"""
Returns the windows this feature uses for calculation
Returns
-------
tuple[tuple[int]]
"""
return self._windows
@windows.setter
def windows(self, value):
self._windows = tuple(sorted(value, reverse=True))
@property
def indices(self):
"""
The indices for this feature in a feature set
See Also
--------
FeatureSet
"""
return self._indices
@indices.setter
def indices(self, value):
self._indices = value
class FeatureSet():
"""
FeatureSet Class
The FeatureSet class can be used to bundle a number of features together.
this class then calculates the indices for each feature within a vector
of all features stacked into a single 3 dimensional matrix.
"""
def __init__(self):
self._features = {}
self._cur_index = 0
def __iter__(self):
return iter(self._features)
@property
def items(self):
return self._features.items()
def add(self, feature, name=None):
"""
Parameters
----------
feature : Feature
The feature to add to the set
name : str
The name to give the feature in the set.
If none the features class name and length is used
Returns:
name : str
The name of the added feature
feature : Feature
The added feature
"""
if not name:
name = "{0}-{1}".format(feature.__class__.__name__,
len(self._features) + 1)
feature.name = name
self._features[name] = (feature)
self._recalculate_feature_indices()
return name, feature
def remove(self, name):
"""
Remove the feature from the set
Parameters
----------
name : str
The name of the feature to remove
Returns
-------
bool
Wether the feature was succesfully removed
"""
if name in self._features:
del self._features[name]
self._recalculate_feature_indices()
return True
return False
@property
def index_size(self):
"""
The size of the index
"""
return self._cur_index
def _recalculate_feature_indices(self):
self._cur_index = 0
for feature in self._features.values():
size = feature.size * len(feature.windows)
feature.indices = slice(self._cur_index, self._cur_index + size)
self._cur_index += size
@property
def base_images(self):
"""
list[str]
List of base images that was used to calculate these features
"""
return {f.base_image for f in self._features.values()}
| 25.19186 | 77 | 0.560582 | from abc import ABC, abstractmethod
class Feature(ABC):
base_image = None
size = None
def __init__(self, window_shapes, **kwargs):
self._indices = {}
self._length = 0
self._windows = tuple(sorted(window_shapes, reverse=True))
self.kwargs = kwargs
self.name = self.__class__.__name__
def __call__(self, window):
return self.compute(window, **self.kwargs)
@staticmethod
@abstractmethod
def compute(window, **kwargs):
pass
@property
def windows(self):
return self._windows
@windows.setter
def windows(self, value):
self._windows = tuple(sorted(value, reverse=True))
@property
def indices(self):
return self._indices
@indices.setter
def indices(self, value):
self._indices = value
class FeatureSet():
def __init__(self):
self._features = {}
self._cur_index = 0
def __iter__(self):
return iter(self._features)
@property
def items(self):
return self._features.items()
def add(self, feature, name=None):
if not name:
name = "{0}-{1}".format(feature.__class__.__name__,
len(self._features) + 1)
feature.name = name
self._features[name] = (feature)
self._recalculate_feature_indices()
return name, feature
def remove(self, name):
if name in self._features:
del self._features[name]
self._recalculate_feature_indices()
return True
return False
@property
def index_size(self):
return self._cur_index
def _recalculate_feature_indices(self):
self._cur_index = 0
for feature in self._features.values():
size = feature.size * len(feature.windows)
feature.indices = slice(self._cur_index, self._cur_index + size)
self._cur_index += size
@property
def base_images(self):
return {f.base_image for f in self._features.values()}
| true | true |
1c32f72e07a1f6cfb11be0c78d0d706399c65872 | 35,697 | py | Python | python/ray/tune/test/trial_scheduler_test.py | yfletberliac/ray | 2fd48afa082af012420c3e1a3240e3904cf72c04 | [
"Apache-2.0"
] | 29 | 2019-05-18T12:18:34.000Z | 2022-03-30T01:46:48.000Z | ray/tune/test/trial_scheduler_test.py | kivo360/tesp | a77d9c228a6891b304e789ba2758a4cbfdb75ec0 | [
"MIT"
] | 8 | 2019-08-15T05:42:10.000Z | 2021-05-21T09:41:15.000Z | ray/tune/test/trial_scheduler_test.py | kivo360/tesp | a77d9c228a6891b304e789ba2758a4cbfdb75ec0 | [
"MIT"
] | 8 | 2019-07-15T22:36:20.000Z | 2020-08-09T07:03:26.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import unittest
import numpy as np
import ray
from ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,
PopulationBasedTraining, MedianStoppingRule,
TrialScheduler)
from ray.tune.schedulers.pbt import explore
from ray.tune.trial import Trial, Resources, Checkpoint
from ray.tune.trial_executor import TrialExecutor
from ray.rllib import _register_all
_register_all()
def result(t, rew):
return dict(
time_total_s=t, episode_reward_mean=rew, training_iteration=int(t))
class EarlyStoppingSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def basicSetup(self, rule):
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
for i in range(10):
self.assertEqual(
rule.on_trial_result(None, t1, result(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
rule.on_trial_result(None, t2, result(i, 450)),
TrialScheduler.CONTINUE)
return t1, t2
def testMedianStoppingConstantPerf(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t2, result(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t2, result(6, 0)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t2, result(10, 450)),
TrialScheduler.STOP)
def testMedianStoppingOnCompleteOnly(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
self.assertEqual(
rule.on_trial_result(None, t2, result(100, 0)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(None, t1, result(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t2, result(101, 0)),
TrialScheduler.STOP)
def testMedianStoppingGracePeriod(self):
rule = MedianStoppingRule(grace_period=2.5, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
rule.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(None, t3, result(1, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t3, result(2, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t3, result(3, 10)), TrialScheduler.STOP)
def testMedianStoppingMinSamples(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=2)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(None, t3, result(3, 10)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(None, t2, result(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t3, result(3, 10)), TrialScheduler.STOP)
def testMedianStoppingUsesMedian(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
rule.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(None, t3, result(1, 260)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t3, result(2, 260)),
TrialScheduler.STOP)
def testMedianStoppingSoftStop(self):
rule = MedianStoppingRule(
grace_period=0, min_samples_required=1, hard_stop=False)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
rule.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(None, t3, result(1, 260)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t3, result(2, 260)),
TrialScheduler.PAUSE)
def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)
rule = MedianStoppingRule(
grace_period=0,
min_samples_required=1,
time_attr='training_iteration',
reward_attr='neg_mean_loss')
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
for i in range(10):
self.assertEqual(
rule.on_trial_result(None, t1, result2(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
rule.on_trial_result(None, t2, result2(i, 450)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(None, t1, result2(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t2, result2(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t2, result2(6, 0)),
TrialScheduler.CONTINUE)
class _MockTrialExecutor(TrialExecutor):
def start_trial(self, trial, checkpoint_obj=None):
trial.logger_running = True
trial.restored_checkpoint = checkpoint_obj.value
trial.status = Trial.RUNNING
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
trial.status = Trial.ERROR if error else Trial.TERMINATED
if stop_logger:
trial.logger_running = False
def restore(self, trial, checkpoint=None):
pass
def save(self, trial, type=Checkpoint.DISK):
return trial.trainable_name
def reset_trial(self, trial, new_config, new_experiment_tag):
return False
class _MockTrialRunner():
def __init__(self, scheduler):
self._scheduler_alg = scheduler
self.trials = []
self.trial_executor = _MockTrialExecutor()
def process_action(self, trial, action):
if action == TrialScheduler.CONTINUE:
pass
elif action == TrialScheduler.PAUSE:
self._pause_trial(trial)
elif action == TrialScheduler.STOP:
self.trial_executor.stop_trial(trial)
def stop_trial(self, trial):
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
elif trial.status in [Trial.PENDING, Trial.PAUSED]:
self._scheduler_alg.on_trial_remove(self, trial)
else:
self._scheduler_alg.on_trial_complete(self, trial, result(100, 10))
def add_trial(self, trial):
self.trials.append(trial)
self._scheduler_alg.on_trial_add(self, trial)
def get_trials(self):
return self.trials
def has_resources(self, resources):
return True
def _pause_trial(self, trial):
trial.status = Trial.PAUSED
def _launch_trial(self, trial):
trial.status = Trial.RUNNING
class HyperbandSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def schedulerSetup(self, num_trials, max_t=81):
"""Setup a scheduler and Runner with max Iter = 9.
Bracketing is placed as follows:
(5, 81);
(8, 27) -> (3, 54);
(15, 9) -> (5, 27) -> (2, 45);
(34, 3) -> (12, 9) -> (4, 27) -> (2, 42);
(81, 1) -> (27, 3) -> (9, 9) -> (3, 27) -> (1, 41);"""
sched = HyperBandScheduler(max_t=max_t)
for i in range(num_trials):
t = Trial("__fake")
sched.on_trial_add(None, t)
runner = _MockTrialRunner(sched)
return sched, runner
def default_statistics(self):
"""Default statistics for HyperBand."""
sched = HyperBandScheduler()
res = {
str(s): {
"n": sched._get_n0(s),
"r": sched._get_r0(s)
}
for s in range(sched._s_max_1)
}
res["max_trials"] = sum(v["n"] for v in res.values())
res["brack_count"] = sched._s_max_1
res["s_max"] = sched._s_max_1 - 1
return res
def downscale(self, n, sched):
return int(np.ceil(n / sched._eta))
def basicSetup(self):
"""Setup and verify full band."""
stats = self.default_statistics()
sched, _ = self.schedulerSetup(stats["max_trials"])
self.assertEqual(len(sched._hyperbands), 1)
self.assertEqual(sched._cur_band_filled(), True)
filled_band = sched._hyperbands[0]
for bracket in filled_band:
self.assertEqual(bracket.filled(), True)
return sched
def advancedSetup(self):
sched = self.basicSetup()
for i in range(4):
t = Trial("__fake")
sched.on_trial_add(None, t)
self.assertEqual(sched._cur_band_filled(), False)
unfilled_band = sched._hyperbands[-1]
self.assertEqual(len(unfilled_band), 2)
bracket = unfilled_band[-1]
self.assertEqual(bracket.filled(), False)
self.assertEqual(len(bracket.current_trials()), 7)
return sched
def testConfigSameEta(self):
sched = HyperBandScheduler()
i = 0
while not sched._cur_band_filled():
t = Trial("__fake")
sched.on_trial_add(None, t)
i += 1
self.assertEqual(len(sched._hyperbands[0]), 5)
self.assertEqual(sched._hyperbands[0][0]._n, 5)
self.assertEqual(sched._hyperbands[0][0]._r, 81)
self.assertEqual(sched._hyperbands[0][-1]._n, 81)
self.assertEqual(sched._hyperbands[0][-1]._r, 1)
sched = HyperBandScheduler(max_t=810)
i = 0
while not sched._cur_band_filled():
t = Trial("__fake")
sched.on_trial_add(None, t)
i += 1
self.assertEqual(len(sched._hyperbands[0]), 5)
self.assertEqual(sched._hyperbands[0][0]._n, 5)
self.assertEqual(sched._hyperbands[0][0]._r, 810)
self.assertEqual(sched._hyperbands[0][-1]._n, 81)
self.assertEqual(sched._hyperbands[0][-1]._r, 10)
def testConfigSameEtaSmall(self):
sched = HyperBandScheduler(max_t=1)
i = 0
while len(sched._hyperbands) < 2:
t = Trial("__fake")
sched.on_trial_add(None, t)
i += 1
self.assertEqual(len(sched._hyperbands[0]), 5)
self.assertTrue(all(v is None for v in sched._hyperbands[0][1:]))
def testSuccessiveHalving(self):
"""Setup full band, then iterate through last bracket (n=81)
to make sure successive halving is correct."""
stats = self.default_statistics()
sched, mock_runner = self.schedulerSetup(stats["max_trials"])
big_bracket = sched._state["bracket"]
cur_units = stats[str(stats["s_max"])]["r"]
# The last bracket will downscale 4 times
for x in range(stats["brack_count"] - 1):
trials = big_bracket.current_trials()
current_length = len(trials)
for trl in trials:
mock_runner._launch_trial(trl)
# Provides results from 0 to 8 in order, keeping last one running
for i, trl in enumerate(trials):
action = sched.on_trial_result(mock_runner, trl,
result(cur_units, i))
if i < current_length - 1:
self.assertEqual(action, TrialScheduler.PAUSE)
mock_runner.process_action(trl, action)
self.assertEqual(action, TrialScheduler.CONTINUE)
new_length = len(big_bracket.current_trials())
self.assertEqual(new_length, self.downscale(current_length, sched))
cur_units += int(cur_units * sched._eta)
self.assertEqual(len(big_bracket.current_trials()), 1)
def testHalvingStop(self):
stats = self.default_statistics()
num_trials = stats[str(0)]["n"] + stats[str(1)]["n"]
sched, mock_runner = self.schedulerSetup(num_trials)
big_bracket = sched._state["bracket"]
for trl in big_bracket.current_trials():
mock_runner._launch_trial(trl)
# # Provides result in reverse order, killing the last one
cur_units = stats[str(1)]["r"]
for i, trl in reversed(list(enumerate(big_bracket.current_trials()))):
action = sched.on_trial_result(mock_runner, trl,
result(cur_units, i))
mock_runner.process_action(trl, action)
self.assertEqual(action, TrialScheduler.STOP)
def testStopsLastOne(self):
stats = self.default_statistics()
num_trials = stats[str(0)]["n"] # setup one bracket
sched, mock_runner = self.schedulerSetup(num_trials)
big_bracket = sched._state["bracket"]
for trl in big_bracket.current_trials():
mock_runner._launch_trial(trl)
# # Provides result in reverse order, killing the last one
cur_units = stats[str(0)]["r"]
for i, trl in enumerate(big_bracket.current_trials()):
action = sched.on_trial_result(mock_runner, trl,
result(cur_units, i))
mock_runner.process_action(trl, action)
self.assertEqual(action, TrialScheduler.STOP)
def testTrialErrored(self):
"""If a trial errored, make sure successive halving still happens"""
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + 3
sched, mock_runner = self.schedulerSetup(trial_count)
t1, t2, t3 = sched._state["bracket"].current_trials()
for t in [t1, t2, t3]:
mock_runner._launch_trial(t)
sched.on_trial_error(mock_runner, t3)
self.assertEqual(
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t1,
result(stats[str(1)]["r"], 10)))
self.assertEqual(
TrialScheduler.CONTINUE,
sched.on_trial_result(mock_runner, t2,
result(stats[str(1)]["r"], 10)))
def testTrialErrored2(self):
"""Check successive halving happened even when last trial failed"""
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + stats[str(1)]["n"]
sched, mock_runner = self.schedulerSetup(trial_count)
trials = sched._state["bracket"].current_trials()
for t in trials[:-1]:
mock_runner._launch_trial(t)
sched.on_trial_result(mock_runner, t, result(
stats[str(1)]["r"], 10))
mock_runner._launch_trial(trials[-1])
sched.on_trial_error(mock_runner, trials[-1])
self.assertEqual(
len(sched._state["bracket"].current_trials()),
self.downscale(stats[str(1)]["n"], sched))
def testTrialEndedEarly(self):
"""Check successive halving happened even when one trial failed"""
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + 3
sched, mock_runner = self.schedulerSetup(trial_count)
t1, t2, t3 = sched._state["bracket"].current_trials()
for t in [t1, t2, t3]:
mock_runner._launch_trial(t)
sched.on_trial_complete(mock_runner, t3, result(1, 12))
self.assertEqual(
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t1,
result(stats[str(1)]["r"], 10)))
self.assertEqual(
TrialScheduler.CONTINUE,
sched.on_trial_result(mock_runner, t2,
result(stats[str(1)]["r"], 10)))
def testTrialEndedEarly2(self):
"""Check successive halving happened even when last trial failed"""
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + stats[str(1)]["n"]
sched, mock_runner = self.schedulerSetup(trial_count)
trials = sched._state["bracket"].current_trials()
for t in trials[:-1]:
mock_runner._launch_trial(t)
sched.on_trial_result(mock_runner, t, result(
stats[str(1)]["r"], 10))
mock_runner._launch_trial(trials[-1])
sched.on_trial_complete(mock_runner, trials[-1], result(100, 12))
self.assertEqual(
len(sched._state["bracket"].current_trials()),
self.downscale(stats[str(1)]["n"], sched))
def testAddAfterHalving(self):
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + 1
sched, mock_runner = self.schedulerSetup(trial_count)
bracket_trials = sched._state["bracket"].current_trials()
init_units = stats[str(1)]["r"]
for t in bracket_trials:
mock_runner._launch_trial(t)
for i, t in enumerate(bracket_trials):
action = sched.on_trial_result(mock_runner, t, result(
init_units, i))
self.assertEqual(action, TrialScheduler.CONTINUE)
t = Trial("__fake")
sched.on_trial_add(None, t)
mock_runner._launch_trial(t)
self.assertEqual(len(sched._state["bracket"].current_trials()), 2)
# Make sure that newly added trial gets fair computation (not just 1)
self.assertEqual(
TrialScheduler.CONTINUE,
sched.on_trial_result(mock_runner, t, result(init_units, 12)))
new_units = init_units + int(init_units * sched._eta)
self.assertEqual(
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t, result(new_units, 12)))
def testAlternateMetrics(self):
"""Checking that alternate metrics will pass."""
def result2(t, rew):
return dict(time_total_s=t, neg_mean_loss=rew)
sched = HyperBandScheduler(
time_attr='time_total_s', reward_attr='neg_mean_loss')
stats = self.default_statistics()
for i in range(stats["max_trials"]):
t = Trial("__fake")
sched.on_trial_add(None, t)
runner = _MockTrialRunner(sched)
big_bracket = sched._hyperbands[0][-1]
for trl in big_bracket.current_trials():
runner._launch_trial(trl)
current_length = len(big_bracket.current_trials())
# Provides results from 0 to 8 in order, keeping the last one running
for i, trl in enumerate(big_bracket.current_trials()):
action = sched.on_trial_result(runner, trl, result2(1, i))
runner.process_action(trl, action)
new_length = len(big_bracket.current_trials())
self.assertEqual(action, TrialScheduler.CONTINUE)
self.assertEqual(new_length, self.downscale(current_length, sched))
def testJumpingTime(self):
sched, mock_runner = self.schedulerSetup(81)
big_bracket = sched._hyperbands[0][-1]
for trl in big_bracket.current_trials():
mock_runner._launch_trial(trl)
# Provides results from 0 to 8 in order, keeping the last one running
main_trials = big_bracket.current_trials()[:-1]
jump = big_bracket.current_trials()[-1]
for i, trl in enumerate(main_trials):
action = sched.on_trial_result(mock_runner, trl, result(1, i))
mock_runner.process_action(trl, action)
action = sched.on_trial_result(mock_runner, jump, result(4, i))
self.assertEqual(action, TrialScheduler.PAUSE)
current_length = len(big_bracket.current_trials())
self.assertLess(current_length, 27)
def testRemove(self):
"""Test with 4: start 1, remove 1 pending, add 2, remove 1 pending."""
sched, runner = self.schedulerSetup(4)
trials = sorted(list(sched._trial_info), key=lambda t: t.trial_id)
runner._launch_trial(trials[0])
sched.on_trial_result(runner, trials[0], result(1, 5))
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
bracket, _ = sched._trial_info[trials[1]]
self.assertTrue(trials[1] in bracket._live_trials)
sched.on_trial_remove(runner, trials[1])
self.assertFalse(trials[1] in bracket._live_trials)
for i in range(2):
trial = Trial("__fake")
sched.on_trial_add(None, trial)
bracket, _ = sched._trial_info[trial]
self.assertTrue(trial in bracket._live_trials)
sched.on_trial_remove(runner, trial) # where trial is not running
self.assertFalse(trial in bracket._live_trials)
def testFilterNoneBracket(self):
sched, runner = self.schedulerSetup(100, 20)
# `sched' should contains None brackets
non_brackets = [
b for hyperband in sched._hyperbands for b in hyperband
if b is None
]
self.assertTrue(non_brackets)
# Make sure `choose_trial_to_run' still works
trial = sched.choose_trial_to_run(runner)
self.assertIsNotNone(trial)
class _MockTrial(Trial):
def __init__(self, i, config):
self.trainable_name = "trial_{}".format(i)
self.config = config
self.experiment_tag = "tag"
self.logger_running = False
self.restored_checkpoint = None
self.resources = Resources(1, 0)
class PopulationBasedTestingSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def basicSetup(self, resample_prob=0.0, explore=None):
pbt = PopulationBasedTraining(
time_attr="training_iteration",
perturbation_interval=10,
resample_probability=resample_prob,
hyperparam_mutations={
"id_factor": [100],
"float_factor": lambda: 100.0,
"int_factor": lambda: 10,
},
custom_explore_fn=explore)
runner = _MockTrialRunner(pbt)
for i in range(5):
trial = _MockTrial(
i, {
"id_factor": i,
"float_factor": 2.0,
"const_factor": 3,
"int_factor": 10
})
runner.add_trial(trial)
trial.status = Trial.RUNNING
self.assertEqual(
pbt.on_trial_result(runner, trial, result(10, 50 * i)),
TrialScheduler.CONTINUE)
pbt.reset_stats()
return pbt, runner
def testCheckpointsMostPromisingTrials(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
# no checkpoint: haven't hit next perturbation interval yet
self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(15, 200)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])
self.assertEqual(pbt._num_checkpoints, 0)
# checkpoint: both past interval and upper quantile
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, 200)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [200, 50, 100, 150, 200])
self.assertEqual(pbt._num_checkpoints, 1)
self.assertEqual(
pbt.on_trial_result(runner, trials[1], result(30, 201)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [200, 201, 100, 150, 200])
self.assertEqual(pbt._num_checkpoints, 2)
# not upper quantile any more
self.assertEqual(
pbt.on_trial_result(runner, trials[4], result(30, 199)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt._num_checkpoints, 2)
self.assertEqual(pbt._num_perturbations, 0)
def testPerturbsLowPerformingTrials(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
# no perturbation: haven't hit next perturbation interval
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(15, -100)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])
self.assertTrue("@perturbed" not in trials[0].experiment_tag)
self.assertEqual(pbt._num_perturbations, 0)
# perturb since it's lower quantile
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [-100, 50, 100, 150, 200])
self.assertTrue("@perturbed" in trials[0].experiment_tag)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertEqual(pbt._num_perturbations, 1)
# also perturbed
self.assertEqual(
pbt.on_trial_result(runner, trials[2], result(20, 40)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [-100, 50, 40, 150, 200])
self.assertEqual(pbt._num_perturbations, 2)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertTrue("@perturbed" in trials[2].experiment_tag)
def testPerturbWithoutResample(self):
pbt, runner = self.basicSetup(resample_prob=0.0)
trials = runner.get_trials()
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertIn(trials[0].config["id_factor"], [100])
self.assertIn(trials[0].config["float_factor"], [2.4, 1.6])
self.assertEqual(type(trials[0].config["float_factor"]), float)
self.assertIn(trials[0].config["int_factor"], [8, 12])
self.assertEqual(type(trials[0].config["int_factor"]), int)
self.assertEqual(trials[0].config["const_factor"], 3)
def testPerturbWithResample(self):
pbt, runner = self.basicSetup(resample_prob=1.0)
trials = runner.get_trials()
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertEqual(trials[0].config["id_factor"], 100)
self.assertEqual(trials[0].config["float_factor"], 100.0)
self.assertEqual(type(trials[0].config["float_factor"]), float)
self.assertEqual(trials[0].config["int_factor"], 10)
self.assertEqual(type(trials[0].config["int_factor"]), int)
self.assertEqual(trials[0].config["const_factor"], 3)
def testPerturbationValues(self):
def assertProduces(fn, values):
random.seed(0)
seen = set()
for _ in range(100):
seen.add(fn()["v"])
self.assertEqual(seen, values)
# Categorical case
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{3, 8})
assertProduces(
lambda: explore({"v": 3}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{3, 4})
assertProduces(
lambda: explore({"v": 10}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{8, 10})
assertProduces(
lambda: explore({"v": 7}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{3, 4, 8, 10})
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 1.0, lambda x: x),
{3, 4, 8, 10})
# Continuous case
assertProduces(
lambda: explore(
{"v": 100}, {"v": lambda: random.choice([10, 100])}, 0.0,
lambda x: x),
{80, 120})
assertProduces(
lambda: explore(
{"v": 100.0}, {"v": lambda: random.choice([10, 100])}, 0.0,
lambda x: x),
{80.0, 120.0})
assertProduces(
lambda: explore(
{"v": 100.0}, {"v": lambda: random.choice([10, 100])}, 1.0,
lambda x: x),
{10.0, 100.0})
def testYieldsTimeToOtherTrials(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
trials[0].status = Trial.PENDING # simulate not enough resources
self.assertEqual(
pbt.on_trial_result(runner, trials[1], result(20, 1000)),
TrialScheduler.PAUSE)
self.assertEqual(pbt.last_scores(trials), [0, 1000, 100, 150, 200])
self.assertEqual(pbt.choose_trial_to_run(runner), trials[0])
def testSchedulesMostBehindTrialToRun(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
pbt.on_trial_result(runner, trials[0], result(800, 1000))
pbt.on_trial_result(runner, trials[1], result(700, 1001))
pbt.on_trial_result(runner, trials[2], result(600, 1002))
pbt.on_trial_result(runner, trials[3], result(500, 1003))
pbt.on_trial_result(runner, trials[4], result(700, 1004))
self.assertEqual(pbt.choose_trial_to_run(runner), None)
for i in range(5):
trials[i].status = Trial.PENDING
self.assertEqual(pbt.choose_trial_to_run(runner), trials[3])
def testPerturbationResetsLastPerturbTime(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
pbt.on_trial_result(runner, trials[0], result(10000, 1005))
pbt.on_trial_result(runner, trials[1], result(10000, 1004))
pbt.on_trial_result(runner, trials[2], result(600, 1003))
self.assertEqual(pbt._num_perturbations, 0)
pbt.on_trial_result(runner, trials[3], result(500, 1002))
self.assertEqual(pbt._num_perturbations, 1)
pbt.on_trial_result(runner, trials[3], result(600, 100))
self.assertEqual(pbt._num_perturbations, 1)
pbt.on_trial_result(runner, trials[3], result(11000, 100))
self.assertEqual(pbt._num_perturbations, 2)
def testPostprocessingHook(self):
def explore(new_config):
new_config["id_factor"] = 42
new_config["float_factor"] = 43
return new_config
pbt, runner = self.basicSetup(resample_prob=0.0, explore=explore)
trials = runner.get_trials()
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertEqual(trials[0].config["id_factor"], 42)
self.assertEqual(trials[0].config["float_factor"], 43)
class AsyncHyperBandSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def basicSetup(self, scheduler):
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
scheduler.on_trial_add(None, t1)
scheduler.on_trial_add(None, t2)
for i in range(10):
self.assertEqual(
scheduler.on_trial_result(None, t1, result(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
scheduler.on_trial_result(None, t2, result(i, 450)),
TrialScheduler.CONTINUE)
return t1, t2
def testAsyncHBOnComplete(self):
scheduler = AsyncHyperBandScheduler(max_t=10, brackets=1)
t1, t2 = self.basicSetup(scheduler)
t3 = Trial("PPO")
scheduler.on_trial_add(None, t3)
scheduler.on_trial_complete(None, t3, result(10, 1000))
self.assertEqual(
scheduler.on_trial_result(None, t2, result(101, 0)),
TrialScheduler.STOP)
def testAsyncHBGracePeriod(self):
scheduler = AsyncHyperBandScheduler(
grace_period=2.5, reduction_factor=3, brackets=1)
t1, t2 = self.basicSetup(scheduler)
scheduler.on_trial_complete(None, t1, result(10, 1000))
scheduler.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
scheduler.on_trial_add(None, t3)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(1, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(2, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(3, 10)),
TrialScheduler.STOP)
def testAsyncHBAllCompletes(self):
scheduler = AsyncHyperBandScheduler(max_t=10, brackets=10)
trials = [Trial("PPO") for i in range(10)]
for t in trials:
scheduler.on_trial_add(None, t)
for t in trials:
self.assertEqual(
scheduler.on_trial_result(None, t, result(10, -2)),
TrialScheduler.STOP)
def testAsyncHBUsesPercentile(self):
scheduler = AsyncHyperBandScheduler(
grace_period=1, max_t=10, reduction_factor=2, brackets=1)
t1, t2 = self.basicSetup(scheduler)
scheduler.on_trial_complete(None, t1, result(10, 1000))
scheduler.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
scheduler.on_trial_add(None, t3)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(1, 260)),
TrialScheduler.STOP)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(2, 260)),
TrialScheduler.STOP)
def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)
scheduler = AsyncHyperBandScheduler(
grace_period=1,
time_attr='training_iteration',
reward_attr='neg_mean_loss',
brackets=1)
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
scheduler.on_trial_add(None, t1)
scheduler.on_trial_add(None, t2)
for i in range(10):
self.assertEqual(
scheduler.on_trial_result(None, t1, result2(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(i, 450)),
TrialScheduler.CONTINUE)
scheduler.on_trial_complete(None, t1, result2(10, 1000))
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(6, 0)),
TrialScheduler.CONTINUE)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 39.184413 | 79 | 0.60837 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import unittest
import numpy as np
import ray
from ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,
PopulationBasedTraining, MedianStoppingRule,
TrialScheduler)
from ray.tune.schedulers.pbt import explore
from ray.tune.trial import Trial, Resources, Checkpoint
from ray.tune.trial_executor import TrialExecutor
from ray.rllib import _register_all
_register_all()
def result(t, rew):
return dict(
time_total_s=t, episode_reward_mean=rew, training_iteration=int(t))
class EarlyStoppingSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all()
def basicSetup(self, rule):
t1 = Trial("PPO")
t2 = Trial("PPO")
for i in range(10):
self.assertEqual(
rule.on_trial_result(None, t1, result(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
rule.on_trial_result(None, t2, result(i, 450)),
TrialScheduler.CONTINUE)
return t1, t2
def testMedianStoppingConstantPerf(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t2, result(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t2, result(6, 0)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t2, result(10, 450)),
TrialScheduler.STOP)
def testMedianStoppingOnCompleteOnly(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
self.assertEqual(
rule.on_trial_result(None, t2, result(100, 0)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(None, t1, result(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t2, result(101, 0)),
TrialScheduler.STOP)
def testMedianStoppingGracePeriod(self):
rule = MedianStoppingRule(grace_period=2.5, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
rule.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(None, t3, result(1, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t3, result(2, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t3, result(3, 10)), TrialScheduler.STOP)
def testMedianStoppingMinSamples(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=2)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(None, t3, result(3, 10)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(None, t2, result(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t3, result(3, 10)), TrialScheduler.STOP)
def testMedianStoppingUsesMedian(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
rule.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(None, t3, result(1, 260)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t3, result(2, 260)),
TrialScheduler.STOP)
def testMedianStoppingSoftStop(self):
rule = MedianStoppingRule(
grace_period=0, min_samples_required=1, hard_stop=False)
t1, t2 = self.basicSetup(rule)
rule.on_trial_complete(None, t1, result(10, 1000))
rule.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(None, t3, result(1, 260)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t3, result(2, 260)),
TrialScheduler.PAUSE)
def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)
rule = MedianStoppingRule(
grace_period=0,
min_samples_required=1,
time_attr='training_iteration',
reward_attr='neg_mean_loss')
t1 = Trial("PPO")
t2 = Trial("PPO")
for i in range(10):
self.assertEqual(
rule.on_trial_result(None, t1, result2(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
rule.on_trial_result(None, t2, result2(i, 450)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(None, t1, result2(10, 1000))
self.assertEqual(
rule.on_trial_result(None, t2, result2(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(None, t2, result2(6, 0)),
TrialScheduler.CONTINUE)
class _MockTrialExecutor(TrialExecutor):
def start_trial(self, trial, checkpoint_obj=None):
trial.logger_running = True
trial.restored_checkpoint = checkpoint_obj.value
trial.status = Trial.RUNNING
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
trial.status = Trial.ERROR if error else Trial.TERMINATED
if stop_logger:
trial.logger_running = False
def restore(self, trial, checkpoint=None):
pass
def save(self, trial, type=Checkpoint.DISK):
return trial.trainable_name
def reset_trial(self, trial, new_config, new_experiment_tag):
return False
class _MockTrialRunner():
def __init__(self, scheduler):
self._scheduler_alg = scheduler
self.trials = []
self.trial_executor = _MockTrialExecutor()
def process_action(self, trial, action):
if action == TrialScheduler.CONTINUE:
pass
elif action == TrialScheduler.PAUSE:
self._pause_trial(trial)
elif action == TrialScheduler.STOP:
self.trial_executor.stop_trial(trial)
def stop_trial(self, trial):
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
elif trial.status in [Trial.PENDING, Trial.PAUSED]:
self._scheduler_alg.on_trial_remove(self, trial)
else:
self._scheduler_alg.on_trial_complete(self, trial, result(100, 10))
def add_trial(self, trial):
self.trials.append(trial)
self._scheduler_alg.on_trial_add(self, trial)
def get_trials(self):
return self.trials
def has_resources(self, resources):
return True
def _pause_trial(self, trial):
trial.status = Trial.PAUSED
def _launch_trial(self, trial):
trial.status = Trial.RUNNING
class HyperbandSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all()
def schedulerSetup(self, num_trials, max_t=81):
sched = HyperBandScheduler(max_t=max_t)
for i in range(num_trials):
t = Trial("__fake")
sched.on_trial_add(None, t)
runner = _MockTrialRunner(sched)
return sched, runner
def default_statistics(self):
sched = HyperBandScheduler()
res = {
str(s): {
"n": sched._get_n0(s),
"r": sched._get_r0(s)
}
for s in range(sched._s_max_1)
}
res["max_trials"] = sum(v["n"] for v in res.values())
res["brack_count"] = sched._s_max_1
res["s_max"] = sched._s_max_1 - 1
return res
def downscale(self, n, sched):
return int(np.ceil(n / sched._eta))
def basicSetup(self):
stats = self.default_statistics()
sched, _ = self.schedulerSetup(stats["max_trials"])
self.assertEqual(len(sched._hyperbands), 1)
self.assertEqual(sched._cur_band_filled(), True)
filled_band = sched._hyperbands[0]
for bracket in filled_band:
self.assertEqual(bracket.filled(), True)
return sched
def advancedSetup(self):
sched = self.basicSetup()
for i in range(4):
t = Trial("__fake")
sched.on_trial_add(None, t)
self.assertEqual(sched._cur_band_filled(), False)
unfilled_band = sched._hyperbands[-1]
self.assertEqual(len(unfilled_band), 2)
bracket = unfilled_band[-1]
self.assertEqual(bracket.filled(), False)
self.assertEqual(len(bracket.current_trials()), 7)
return sched
def testConfigSameEta(self):
sched = HyperBandScheduler()
i = 0
while not sched._cur_band_filled():
t = Trial("__fake")
sched.on_trial_add(None, t)
i += 1
self.assertEqual(len(sched._hyperbands[0]), 5)
self.assertEqual(sched._hyperbands[0][0]._n, 5)
self.assertEqual(sched._hyperbands[0][0]._r, 81)
self.assertEqual(sched._hyperbands[0][-1]._n, 81)
self.assertEqual(sched._hyperbands[0][-1]._r, 1)
sched = HyperBandScheduler(max_t=810)
i = 0
while not sched._cur_band_filled():
t = Trial("__fake")
sched.on_trial_add(None, t)
i += 1
self.assertEqual(len(sched._hyperbands[0]), 5)
self.assertEqual(sched._hyperbands[0][0]._n, 5)
self.assertEqual(sched._hyperbands[0][0]._r, 810)
self.assertEqual(sched._hyperbands[0][-1]._n, 81)
self.assertEqual(sched._hyperbands[0][-1]._r, 10)
def testConfigSameEtaSmall(self):
sched = HyperBandScheduler(max_t=1)
i = 0
while len(sched._hyperbands) < 2:
t = Trial("__fake")
sched.on_trial_add(None, t)
i += 1
self.assertEqual(len(sched._hyperbands[0]), 5)
self.assertTrue(all(v is None for v in sched._hyperbands[0][1:]))
def testSuccessiveHalving(self):
stats = self.default_statistics()
sched, mock_runner = self.schedulerSetup(stats["max_trials"])
big_bracket = sched._state["bracket"]
cur_units = stats[str(stats["s_max"])]["r"]
for x in range(stats["brack_count"] - 1):
trials = big_bracket.current_trials()
current_length = len(trials)
for trl in trials:
mock_runner._launch_trial(trl)
for i, trl in enumerate(trials):
action = sched.on_trial_result(mock_runner, trl,
result(cur_units, i))
if i < current_length - 1:
self.assertEqual(action, TrialScheduler.PAUSE)
mock_runner.process_action(trl, action)
self.assertEqual(action, TrialScheduler.CONTINUE)
new_length = len(big_bracket.current_trials())
self.assertEqual(new_length, self.downscale(current_length, sched))
cur_units += int(cur_units * sched._eta)
self.assertEqual(len(big_bracket.current_trials()), 1)
def testHalvingStop(self):
stats = self.default_statistics()
num_trials = stats[str(0)]["n"] + stats[str(1)]["n"]
sched, mock_runner = self.schedulerSetup(num_trials)
big_bracket = sched._state["bracket"]
for trl in big_bracket.current_trials():
mock_runner._launch_trial(trl)
rl in reversed(list(enumerate(big_bracket.current_trials()))):
action = sched.on_trial_result(mock_runner, trl,
result(cur_units, i))
mock_runner.process_action(trl, action)
self.assertEqual(action, TrialScheduler.STOP)
def testStopsLastOne(self):
stats = self.default_statistics()
num_trials = stats[str(0)]["n"]
sched, mock_runner = self.schedulerSetup(num_trials)
big_bracket = sched._state["bracket"]
for trl in big_bracket.current_trials():
mock_runner._launch_trial(trl)
rl in enumerate(big_bracket.current_trials()):
action = sched.on_trial_result(mock_runner, trl,
result(cur_units, i))
mock_runner.process_action(trl, action)
self.assertEqual(action, TrialScheduler.STOP)
def testTrialErrored(self):
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + 3
sched, mock_runner = self.schedulerSetup(trial_count)
t1, t2, t3 = sched._state["bracket"].current_trials()
for t in [t1, t2, t3]:
mock_runner._launch_trial(t)
sched.on_trial_error(mock_runner, t3)
self.assertEqual(
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t1,
result(stats[str(1)]["r"], 10)))
self.assertEqual(
TrialScheduler.CONTINUE,
sched.on_trial_result(mock_runner, t2,
result(stats[str(1)]["r"], 10)))
def testTrialErrored2(self):
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + stats[str(1)]["n"]
sched, mock_runner = self.schedulerSetup(trial_count)
trials = sched._state["bracket"].current_trials()
for t in trials[:-1]:
mock_runner._launch_trial(t)
sched.on_trial_result(mock_runner, t, result(
stats[str(1)]["r"], 10))
mock_runner._launch_trial(trials[-1])
sched.on_trial_error(mock_runner, trials[-1])
self.assertEqual(
len(sched._state["bracket"].current_trials()),
self.downscale(stats[str(1)]["n"], sched))
def testTrialEndedEarly(self):
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + 3
sched, mock_runner = self.schedulerSetup(trial_count)
t1, t2, t3 = sched._state["bracket"].current_trials()
for t in [t1, t2, t3]:
mock_runner._launch_trial(t)
sched.on_trial_complete(mock_runner, t3, result(1, 12))
self.assertEqual(
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t1,
result(stats[str(1)]["r"], 10)))
self.assertEqual(
TrialScheduler.CONTINUE,
sched.on_trial_result(mock_runner, t2,
result(stats[str(1)]["r"], 10)))
def testTrialEndedEarly2(self):
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + stats[str(1)]["n"]
sched, mock_runner = self.schedulerSetup(trial_count)
trials = sched._state["bracket"].current_trials()
for t in trials[:-1]:
mock_runner._launch_trial(t)
sched.on_trial_result(mock_runner, t, result(
stats[str(1)]["r"], 10))
mock_runner._launch_trial(trials[-1])
sched.on_trial_complete(mock_runner, trials[-1], result(100, 12))
self.assertEqual(
len(sched._state["bracket"].current_trials()),
self.downscale(stats[str(1)]["n"], sched))
def testAddAfterHalving(self):
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + 1
sched, mock_runner = self.schedulerSetup(trial_count)
bracket_trials = sched._state["bracket"].current_trials()
init_units = stats[str(1)]["r"]
for t in bracket_trials:
mock_runner._launch_trial(t)
for i, t in enumerate(bracket_trials):
action = sched.on_trial_result(mock_runner, t, result(
init_units, i))
self.assertEqual(action, TrialScheduler.CONTINUE)
t = Trial("__fake")
sched.on_trial_add(None, t)
mock_runner._launch_trial(t)
self.assertEqual(len(sched._state["bracket"].current_trials()), 2)
self.assertEqual(
TrialScheduler.CONTINUE,
sched.on_trial_result(mock_runner, t, result(init_units, 12)))
new_units = init_units + int(init_units * sched._eta)
self.assertEqual(
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t, result(new_units, 12)))
def testAlternateMetrics(self):
def result2(t, rew):
return dict(time_total_s=t, neg_mean_loss=rew)
sched = HyperBandScheduler(
time_attr='time_total_s', reward_attr='neg_mean_loss')
stats = self.default_statistics()
for i in range(stats["max_trials"]):
t = Trial("__fake")
sched.on_trial_add(None, t)
runner = _MockTrialRunner(sched)
big_bracket = sched._hyperbands[0][-1]
for trl in big_bracket.current_trials():
runner._launch_trial(trl)
current_length = len(big_bracket.current_trials())
for i, trl in enumerate(big_bracket.current_trials()):
action = sched.on_trial_result(runner, trl, result2(1, i))
runner.process_action(trl, action)
new_length = len(big_bracket.current_trials())
self.assertEqual(action, TrialScheduler.CONTINUE)
self.assertEqual(new_length, self.downscale(current_length, sched))
def testJumpingTime(self):
sched, mock_runner = self.schedulerSetup(81)
big_bracket = sched._hyperbands[0][-1]
for trl in big_bracket.current_trials():
mock_runner._launch_trial(trl)
main_trials = big_bracket.current_trials()[:-1]
jump = big_bracket.current_trials()[-1]
for i, trl in enumerate(main_trials):
action = sched.on_trial_result(mock_runner, trl, result(1, i))
mock_runner.process_action(trl, action)
action = sched.on_trial_result(mock_runner, jump, result(4, i))
self.assertEqual(action, TrialScheduler.PAUSE)
current_length = len(big_bracket.current_trials())
self.assertLess(current_length, 27)
def testRemove(self):
sched, runner = self.schedulerSetup(4)
trials = sorted(list(sched._trial_info), key=lambda t: t.trial_id)
runner._launch_trial(trials[0])
sched.on_trial_result(runner, trials[0], result(1, 5))
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
bracket, _ = sched._trial_info[trials[1]]
self.assertTrue(trials[1] in bracket._live_trials)
sched.on_trial_remove(runner, trials[1])
self.assertFalse(trials[1] in bracket._live_trials)
for i in range(2):
trial = Trial("__fake")
sched.on_trial_add(None, trial)
bracket, _ = sched._trial_info[trial]
self.assertTrue(trial in bracket._live_trials)
sched.on_trial_remove(runner, trial)
self.assertFalse(trial in bracket._live_trials)
def testFilterNoneBracket(self):
sched, runner = self.schedulerSetup(100, 20)
non_brackets = [
b for hyperband in sched._hyperbands for b in hyperband
if b is None
]
self.assertTrue(non_brackets)
# Make sure `choose_trial_to_run' still works
trial = sched.choose_trial_to_run(runner)
self.assertIsNotNone(trial)
class _MockTrial(Trial):
def __init__(self, i, config):
self.trainable_name = "trial_{}".format(i)
self.config = config
self.experiment_tag = "tag"
self.logger_running = False
self.restored_checkpoint = None
self.resources = Resources(1, 0)
class PopulationBasedTestingSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all()
def basicSetup(self, resample_prob=0.0, explore=None):
pbt = PopulationBasedTraining(
time_attr="training_iteration",
perturbation_interval=10,
resample_probability=resample_prob,
hyperparam_mutations={
"id_factor": [100],
"float_factor": lambda: 100.0,
"int_factor": lambda: 10,
},
custom_explore_fn=explore)
runner = _MockTrialRunner(pbt)
for i in range(5):
trial = _MockTrial(
i, {
"id_factor": i,
"float_factor": 2.0,
"const_factor": 3,
"int_factor": 10
})
runner.add_trial(trial)
trial.status = Trial.RUNNING
self.assertEqual(
pbt.on_trial_result(runner, trial, result(10, 50 * i)),
TrialScheduler.CONTINUE)
pbt.reset_stats()
return pbt, runner
def testCheckpointsMostPromisingTrials(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(15, 200)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])
self.assertEqual(pbt._num_checkpoints, 0)
# checkpoint: both past interval and upper quantile
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, 200)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [200, 50, 100, 150, 200])
self.assertEqual(pbt._num_checkpoints, 1)
self.assertEqual(
pbt.on_trial_result(runner, trials[1], result(30, 201)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [200, 201, 100, 150, 200])
self.assertEqual(pbt._num_checkpoints, 2)
# not upper quantile any more
self.assertEqual(
pbt.on_trial_result(runner, trials[4], result(30, 199)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt._num_checkpoints, 2)
self.assertEqual(pbt._num_perturbations, 0)
def testPerturbsLowPerformingTrials(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
# no perturbation: haven't hit next perturbation interval
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(15, -100)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])
self.assertTrue("@perturbed" not in trials[0].experiment_tag)
self.assertEqual(pbt._num_perturbations, 0)
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [-100, 50, 100, 150, 200])
self.assertTrue("@perturbed" in trials[0].experiment_tag)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertEqual(pbt._num_perturbations, 1)
# also perturbed
self.assertEqual(
pbt.on_trial_result(runner, trials[2], result(20, 40)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [-100, 50, 40, 150, 200])
self.assertEqual(pbt._num_perturbations, 2)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertTrue("@perturbed" in trials[2].experiment_tag)
def testPerturbWithoutResample(self):
pbt, runner = self.basicSetup(resample_prob=0.0)
trials = runner.get_trials()
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertIn(trials[0].config["id_factor"], [100])
self.assertIn(trials[0].config["float_factor"], [2.4, 1.6])
self.assertEqual(type(trials[0].config["float_factor"]), float)
self.assertIn(trials[0].config["int_factor"], [8, 12])
self.assertEqual(type(trials[0].config["int_factor"]), int)
self.assertEqual(trials[0].config["const_factor"], 3)
def testPerturbWithResample(self):
pbt, runner = self.basicSetup(resample_prob=1.0)
trials = runner.get_trials()
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertEqual(trials[0].config["id_factor"], 100)
self.assertEqual(trials[0].config["float_factor"], 100.0)
self.assertEqual(type(trials[0].config["float_factor"]), float)
self.assertEqual(trials[0].config["int_factor"], 10)
self.assertEqual(type(trials[0].config["int_factor"]), int)
self.assertEqual(trials[0].config["const_factor"], 3)
def testPerturbationValues(self):
def assertProduces(fn, values):
random.seed(0)
seen = set()
for _ in range(100):
seen.add(fn()["v"])
self.assertEqual(seen, values)
# Categorical case
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{3, 8})
assertProduces(
lambda: explore({"v": 3}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{3, 4})
assertProduces(
lambda: explore({"v": 10}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{8, 10})
assertProduces(
lambda: explore({"v": 7}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{3, 4, 8, 10})
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 1.0, lambda x: x),
{3, 4, 8, 10})
# Continuous case
assertProduces(
lambda: explore(
{"v": 100}, {"v": lambda: random.choice([10, 100])}, 0.0,
lambda x: x),
{80, 120})
assertProduces(
lambda: explore(
{"v": 100.0}, {"v": lambda: random.choice([10, 100])}, 0.0,
lambda x: x),
{80.0, 120.0})
assertProduces(
lambda: explore(
{"v": 100.0}, {"v": lambda: random.choice([10, 100])}, 1.0,
lambda x: x),
{10.0, 100.0})
def testYieldsTimeToOtherTrials(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
trials[0].status = Trial.PENDING # simulate not enough resources
self.assertEqual(
pbt.on_trial_result(runner, trials[1], result(20, 1000)),
TrialScheduler.PAUSE)
self.assertEqual(pbt.last_scores(trials), [0, 1000, 100, 150, 200])
self.assertEqual(pbt.choose_trial_to_run(runner), trials[0])
def testSchedulesMostBehindTrialToRun(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
pbt.on_trial_result(runner, trials[0], result(800, 1000))
pbt.on_trial_result(runner, trials[1], result(700, 1001))
pbt.on_trial_result(runner, trials[2], result(600, 1002))
pbt.on_trial_result(runner, trials[3], result(500, 1003))
pbt.on_trial_result(runner, trials[4], result(700, 1004))
self.assertEqual(pbt.choose_trial_to_run(runner), None)
for i in range(5):
trials[i].status = Trial.PENDING
self.assertEqual(pbt.choose_trial_to_run(runner), trials[3])
def testPerturbationResetsLastPerturbTime(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
pbt.on_trial_result(runner, trials[0], result(10000, 1005))
pbt.on_trial_result(runner, trials[1], result(10000, 1004))
pbt.on_trial_result(runner, trials[2], result(600, 1003))
self.assertEqual(pbt._num_perturbations, 0)
pbt.on_trial_result(runner, trials[3], result(500, 1002))
self.assertEqual(pbt._num_perturbations, 1)
pbt.on_trial_result(runner, trials[3], result(600, 100))
self.assertEqual(pbt._num_perturbations, 1)
pbt.on_trial_result(runner, trials[3], result(11000, 100))
self.assertEqual(pbt._num_perturbations, 2)
def testPostprocessingHook(self):
def explore(new_config):
new_config["id_factor"] = 42
new_config["float_factor"] = 43
return new_config
pbt, runner = self.basicSetup(resample_prob=0.0, explore=explore)
trials = runner.get_trials()
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertEqual(trials[0].config["id_factor"], 42)
self.assertEqual(trials[0].config["float_factor"], 43)
class AsyncHyperBandSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def basicSetup(self, scheduler):
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
scheduler.on_trial_add(None, t1)
scheduler.on_trial_add(None, t2)
for i in range(10):
self.assertEqual(
scheduler.on_trial_result(None, t1, result(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
scheduler.on_trial_result(None, t2, result(i, 450)),
TrialScheduler.CONTINUE)
return t1, t2
def testAsyncHBOnComplete(self):
scheduler = AsyncHyperBandScheduler(max_t=10, brackets=1)
t1, t2 = self.basicSetup(scheduler)
t3 = Trial("PPO")
scheduler.on_trial_add(None, t3)
scheduler.on_trial_complete(None, t3, result(10, 1000))
self.assertEqual(
scheduler.on_trial_result(None, t2, result(101, 0)),
TrialScheduler.STOP)
def testAsyncHBGracePeriod(self):
scheduler = AsyncHyperBandScheduler(
grace_period=2.5, reduction_factor=3, brackets=1)
t1, t2 = self.basicSetup(scheduler)
scheduler.on_trial_complete(None, t1, result(10, 1000))
scheduler.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
scheduler.on_trial_add(None, t3)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(1, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(2, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(3, 10)),
TrialScheduler.STOP)
def testAsyncHBAllCompletes(self):
scheduler = AsyncHyperBandScheduler(max_t=10, brackets=10)
trials = [Trial("PPO") for i in range(10)]
for t in trials:
scheduler.on_trial_add(None, t)
for t in trials:
self.assertEqual(
scheduler.on_trial_result(None, t, result(10, -2)),
TrialScheduler.STOP)
def testAsyncHBUsesPercentile(self):
scheduler = AsyncHyperBandScheduler(
grace_period=1, max_t=10, reduction_factor=2, brackets=1)
t1, t2 = self.basicSetup(scheduler)
scheduler.on_trial_complete(None, t1, result(10, 1000))
scheduler.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
scheduler.on_trial_add(None, t3)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(1, 260)),
TrialScheduler.STOP)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(2, 260)),
TrialScheduler.STOP)
def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)
scheduler = AsyncHyperBandScheduler(
grace_period=1,
time_attr='training_iteration',
reward_attr='neg_mean_loss',
brackets=1)
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
scheduler.on_trial_add(None, t1)
scheduler.on_trial_add(None, t2)
for i in range(10):
self.assertEqual(
scheduler.on_trial_result(None, t1, result2(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(i, 450)),
TrialScheduler.CONTINUE)
scheduler.on_trial_complete(None, t1, result2(10, 1000))
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t2, result2(6, 0)),
TrialScheduler.CONTINUE)
if __name__ == "__main__":
unittest.main(verbosity=2)
| true | true |
1c32f8b1da1b45f5c2b37ac0c668b07eb2a168b1 | 3,197 | py | Python | xmodaler/utils/colormap.py | cclauss/xmodaler | 1368fba6c550e97008628edbf01b59a0a6c8fde5 | [
"Apache-2.0"
] | 830 | 2021-06-26T07:16:33.000Z | 2022-03-25T10:31:32.000Z | xmodaler/utils/colormap.py | kevinjunwei/xmodaler | 3e128a816876988c5fb07d842fde4a140e699dde | [
"Apache-2.0"
] | 28 | 2021-08-19T12:39:02.000Z | 2022-03-14T13:04:19.000Z | xmodaler/utils/colormap.py | kevinjunwei/xmodaler | 3e128a816876988c5fb07d842fde4a140e699dde | [
"Apache-2.0"
] | 85 | 2021-08-15T06:58:29.000Z | 2022-02-19T07:30:56.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
"""
An awesome colormap for really neat visualizations.
Copied from Detectron, and removed gray colors.
"""
import numpy as np
__all__ = ["colormap", "random_color"]
# fmt: off
# RGB:
_COLORS = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32).reshape(-1, 3)
# fmt: on
def colormap(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
"""
assert maximum in [255, 1], maximum
c = _COLORS * maximum
if not rgb:
c = c[:, ::-1]
return c
def random_color(rgb=False, maximum=255):
"""
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a vector of 3 numbers
"""
idx = np.random.randint(0, len(_COLORS))
ret = _COLORS[idx] * maximum
if not rgb:
ret = ret[::-1]
return ret | 25.782258 | 75 | 0.474195 |
import numpy as np
__all__ = ["colormap", "random_color"]
_COLORS = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32).reshape(-1, 3)
def colormap(rgb=False, maximum=255):
assert maximum in [255, 1], maximum
c = _COLORS * maximum
if not rgb:
c = c[:, ::-1]
return c
def random_color(rgb=False, maximum=255):
idx = np.random.randint(0, len(_COLORS))
ret = _COLORS[idx] * maximum
if not rgb:
ret = ret[::-1]
return ret | true | true |
1c32f8fbb92ee5978bbd98488f6b44b6b6a6c68b | 1,619 | py | Python | pages/migrations/0001_initial.py | mikehagquist/fitapps3 | 43b46cd475e635fd9ab9542960a65197941a0172 | [
"MIT"
] | null | null | null | pages/migrations/0001_initial.py | mikehagquist/fitapps3 | 43b46cd475e635fd9ab9542960a65197941a0172 | [
"MIT"
] | 3 | 2020-02-11T23:58:50.000Z | 2021-06-10T21:32:19.000Z | pages/migrations/0001_initial.py | mikehagquist/fitapps3 | 43b46cd475e635fd9ab9542960a65197941a0172 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.2 on 2019-03-21 11:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Goals',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('goalname', models.CharField(max_length=60)),
('startdate', models.DateField()),
('enddate', models.DateField()),
('startingweight', models.SmallIntegerField()),
('targetweight', models.SmallIntegerField()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('height', models.SmallIntegerField()),
('birthdate', models.DateField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='goals',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pages.Profile'),
),
]
| 35.977778 | 120 | 0.594194 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Goals',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('goalname', models.CharField(max_length=60)),
('startdate', models.DateField()),
('enddate', models.DateField()),
('startingweight', models.SmallIntegerField()),
('targetweight', models.SmallIntegerField()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('height', models.SmallIntegerField()),
('birthdate', models.DateField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='goals',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pages.Profile'),
),
]
| true | true |
1c32fad27afe9a2b54ac92330f5bf52a9aa3472c | 103 | py | Python | src/config_mnist_train.py | RaulRC/genetic-neural-optimizer | fa169cdc9b43c58470c3e7a7214185d56e61579a | [
"MIT"
] | 1 | 2021-04-30T09:07:15.000Z | 2021-04-30T09:07:15.000Z | src/config_mnist_train.py | RaulRC/genetic-neural-optimizer | fa169cdc9b43c58470c3e7a7214185d56e61579a | [
"MIT"
] | 5 | 2020-01-28T23:00:10.000Z | 2022-02-10T00:16:05.000Z | src/config_mnist_train.py | RaulRC/genetic-neural-optimizer | fa169cdc9b43c58470c3e7a7214185d56e61579a | [
"MIT"
] | null | null | null | iterations = 5
generations_list = [500]
populations_list = [6]
elitism_list = [0.5]
mutables_list = [1] | 20.6 | 24 | 0.728155 | iterations = 5
generations_list = [500]
populations_list = [6]
elitism_list = [0.5]
mutables_list = [1] | true | true |
1c32fbac352e7981530d8bb51524d93d32c8814e | 41,572 | py | Python | pydl_image_encoders/library/tf/imagenet_utils.py | chen0040/pydl-image-encoders | 73c0fb02842fabf824fe78735a180dcc0570624d | [
"MIT"
] | 1 | 2018-03-18T21:46:33.000Z | 2018-03-18T21:46:33.000Z | pydl_image_encoders/library/tf/imagenet_utils.py | chen0040/pydl-image-encoders | 73c0fb02842fabf824fe78735a180dcc0570624d | [
"MIT"
] | null | null | null | pydl_image_encoders/library/tf/imagenet_utils.py | chen0040/pydl-image-encoders | 73c0fb02842fabf824fe78735a180dcc0570624d | [
"MIT"
] | 1 | 2018-03-18T21:46:34.000Z | 2018-03-18T21:46:34.000Z | imagenet_labels = {0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'}
| 41.53047 | 141 | 0.500674 | imagenet_labels = {0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'}
| true | true |
1c32fdb227564e56f103f322d9d23eee416880fb | 14,455 | py | Python | src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py | CuriousCat-7/nni | 66057ba74c5252e38a576712b59a8bf867e2d514 | [
"MIT"
] | 3 | 2021-04-19T08:26:46.000Z | 2022-03-22T20:17:17.000Z | src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py | CuriousCat-7/nni | 66057ba74c5252e38a576712b59a8bf867e2d514 | [
"MIT"
] | 21 | 2020-11-13T19:01:01.000Z | 2022-02-27T09:12:51.000Z | src/sdk/pynni/nni/compression/torch/utils/shape_dependency.py | CuriousCat-7/nni | 66057ba74c5252e38a576712b59a8bf867e2d514 | [
"MIT"
] | 3 | 2020-10-23T02:53:47.000Z | 2020-11-15T22:05:09.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import csv
import logging
__all__ = ['ChannelDependency', 'GroupDependency', 'CatPaddingDependency']
CONV_TYPE = 'aten::_convolution'
ADD_TYPES = ['aten::add', 'aten::add_']
CAT_TYPE = 'aten::cat'
logger = logging.getLogger('Shape_Dependency')
class Dependency:
def __init__(self, model=None, dummy_input=None, traced_model=None):
"""
Build the graph for the model.
"""
from nni._graph_utils import TorchModuleGraph
# check if the input is legal
if traced_model is None:
# user should provide model & dummy_input to trace
# the model or a already traced model
assert model is not None and dummy_input is not None
self.graph = TorchModuleGraph(model, dummy_input, traced_model)
self.dependency = dict()
self.build_dependency()
def build_dependency(self):
raise NotImplementedError
def export(self, filepath):
raise NotImplementedError
class ChannelDependency(Dependency):
def __init__(self, model=None, dummy_input=None, traced_model=None):
"""
This model analyze the channel dependencis between the conv
layers in a model.
Parameters
----------
model : torch.nn.Module
The model to be analyzed.
data : torch.Tensor
The example input data to trace the network architecture.
traced_model : torch._C.Graph
if we alreay has the traced graph of the target model, we donnot
need to trace the model again.
"""
super(ChannelDependency, self).__init__(model, dummy_input, traced_model)
def _get_parent_layers(self, node):
"""
Find the nearest father conv layers for the target node.
Parameters
---------
node : torch._C.Node
target node.
Returns
-------
parent_layers: list
nearest father conv/linear layers for the target worknode.
"""
parent_layers = []
queue = []
queue.append(node)
while queue:
curnode = queue.pop(0)
if curnode.op_type == 'Conv2d' or curnode.op_type == 'Linear':
# find the first met conv
parent_layers.append(curnode.name)
continue
parents = self.graph.find_predecessors(curnode.unique_name)
parents = [self.graph.name_to_node[name] for name in parents]
for parent in parents:
queue.append(parent)
return parent_layers
def build_dependency(self):
"""
Build the channel dependency for the conv layers
in the model.
"""
# unpack the tuple/list manually before analyze the
# channel dependency
self.graph.unpack_manually()
for node in self.graph.nodes_py.nodes_op:
parent_layers = []
# find the node that contains aten::add
# or aten::cat operations
if node.op_type in ADD_TYPES:
parent_layers = self._get_parent_layers(node)
elif node.op_type == CAT_TYPE:
# To determine if this cat operation will introduce channel
# dependency, we need the specific input parameters of the cat
# opertion. To get the input parameters of the cat opertion, we
# need to traverse all the cpp_nodes included by this NodePyGroup,
# because, TorchModuleGraph merges the important nodes and the adjacent
# unimportant nodes (nodes started with prim::attr, for example) into a
# NodepyGroup.
cat_dim = None
for cnode in node.node_cpps:
if cnode.kind() == CAT_TYPE:
cat_dim = list(cnode.inputs())[1].toIValue()
break
if cat_dim != 1:
parent_layers = self._get_parent_layers(node)
dependency_set = set(parent_layers)
# merge the dependencies
for parent in parent_layers:
if parent in self.dependency:
dependency_set.update(self.dependency[parent])
# save the dependencies
for _node in dependency_set:
self.dependency[_node] = dependency_set
def export(self, filepath):
"""
export the channel dependencies as a csv file.
The layers at the same line have output channel
dependencies with each other. For example,
layer1.1.conv2, conv1, and layer1.0.conv2 have
output channel dependencies with each other, which
means the output channel(filters) numbers of these
three layers should be same with each other, otherwise
the model may has shape conflict.
Output example:
Dependency Set,Convolutional Layers
Set 1,layer1.1.conv2,layer1.0.conv2,conv1
Set 2,layer1.0.conv1
Set 3,layer1.1.conv1
"""
header = ['Dependency Set', 'Layers']
setid = 0
visited = set()
with open(filepath, 'w') as csvf:
csv_w = csv.writer(csvf, delimiter=',')
csv_w.writerow(header)
for node in self.graph.nodes_py.nodes_op:
if node.op_type != 'Conv2d' or node in visited:
continue
setid += 1
row = ['Set %d' % setid]
if node.name not in self.dependency:
visited.add(node)
row.append(node.name)
else:
for other in self.dependency[node.name]:
visited.add(self.graph.name_to_node[other])
row.append(other)
csv_w.writerow(row)
@property
def dependency_sets(self):
"""
Get the list of the dependency set.
Returns
-------
dependency_sets : list
list of the dependency sets. For example,
[set(['conv1', 'conv2']), set(['conv3', 'conv4'])]
"""
d_sets = []
visited = set()
for node in self.graph.nodes_py.nodes_op:
if node.op_type != 'Conv2d' or node in visited:
continue
tmp_set = set()
if node.name not in self.dependency:
visited.add(node)
tmp_set.add(node.name)
else:
for other in self.dependency[node.name]:
visited.add(self.graph.name_to_node[other])
tmp_set.add(other)
d_sets.append(tmp_set)
return d_sets
class CatPaddingDependency(ChannelDependency):
def __init__(self, model=None, dummy_input=None, traced_model=None):
super(CatPaddingDependency, self).__init__(model, dummy_input, traced_model)
def build_dependency(self):
"""
Build the cat padding dependencies.
If the output features of several layers are stitched together
by cat operation, then these layers have cat padding dependencies.
This is because when inferring the cat mask, we need all the input
masks for the cat operation. At this time we need to know the source
of all input vectors of a cat operation.
"""
for node in self.graph.nodes_py.nodes_op:
parent_layers = []
if node.op_type == CAT_TYPE:
parent_layers = self._get_parent_layers(node)
dependency_set = set(parent_layers)
# merge the dependencies
for parent in parent_layers:
if parent in self.dependency:
dependency_set.update(self.dependency[parent])
# save the dependencies
for _node in dependency_set:
self.dependency[_node] = dependency_set
@property
def dependency_sets(self):
d_sets = []
visited = set()
for nodename in self.dependency:
if nodename in visited:
continue
d_sets.append(self.dependency[nodename])
return d_sets
def export(self, filepath):
"""
Export the dependencies into a file.
In the output file, each line contains a set of layers
whose output features are stitched together by the cat
operation.
output example:
Dependency Set, Layers
set1, Conv1, Conv2
set2, Conv3, Conv4
"""
header = ['Dependency Set', 'Layers']
setid = 0
with open(filepath, 'w') as csvf:
csv_w = csv.writer(csvf, delimiter=',')
csv_w.writerow(header)
for layers in self.dependency_sets:
setid += 1
row = ['Set %d' % setid]
row.extend(list(layers))
csv_w.writerow(row)
class GroupDependency(Dependency):
def __init__(self, model=None, dummy_input=None, traced_model=None):
"""
This model analyze the group dependencis between the conv
layers in a model.
Parameters
----------
model : torch.nn.Module
The model to be analyzed.
data : torch.Tensor
The example input data to trace the network architecture.
traced_model : torch._C.Graph
if we alreay has the traced graph of the target model, we donnot
need to trace the model again.
"""
super(GroupDependency, self).__init__(model, dummy_input, traced_model)
def _get_parent_convs(self, node):
"""
Find the nearest father conv layers for the target node.
Parameters
---------
node : torch._C.Node
target node.
Returns
-------
parent_layers : list
nearest father conv layers for the target node. Due to the group
dependency only exists between the conv layers, so we only find
the parent conv layers.
"""
parent_layers = []
# the input node is a Conv node
predeessors = self.graph.find_predecessors(node.unique_name)
predeessors = [self.graph.name_to_node[x] for x in predeessors]
queue = predeessors
while queue:
curnode = queue.pop(0)
if curnode.op_type == 'Conv2d':
# find the first met conv
parent_layers.append(curnode.name)
continue
parents = self.graph.find_predecessors(curnode.unique_name)
parents = [self.graph.name_to_node[name] for name in parents]
for parent in parents:
queue.append(parent)
return parent_layers
def _get_conv_groups(self, node_group):
"""
Get the number of groups for a convolutional layer.
Parameters
----------
node_group : NodePyGroup
target node.
Returns
-------
group : int
the number of the groups of the target conv layer.
"""
cpp_conv = list(filter(lambda x: x.kind() == CONV_TYPE, node_group.node_cpps))
assert len(cpp_conv) == 1
cpp_conv = cpp_conv[0]
inputs = list(cpp_conv.inputs())
# get the number of the group from the input parameters
group = inputs[8].toIValue()
return group
def build_dependency(self):
"""
Build the channel dependency for the conv layers
in the model. This function return the group number
of each conv layers. Note that, here, the group count
of conv layers may be larger than their originl groups.
This is because that the input channel will also be grouped
for the group conv layers. To make this clear, assume we
have two group conv layers: conv1(group=2), conv2(group=4).
conv2 takes the output features of conv1 as input.
Then we have to the filters of conv1 can still be
divided into 4 groups after filter pruning, because
the input channels of conv2 shoule be divided into
4 groups.
Returns
-------
self.dependency : dict
key: the name of conv layers, value: the minimum value that the number of
filters should be divisible to.
"""
for node in self.graph.nodes_py.nodes_op:
if node.op_type == 'Conv2d':
group = self._get_conv_groups(node)
if node.name in self.dependency:
# the conv layer whose group is larger than 1 will require that
# it's number of output channel to be divisible by the number of group.
self.dependency[node.name] = max(self.dependency[node.name], group)
else:
self.dependency[node.name] = group
if group > 1:
# for the conv layer whose group is larger than 1, it will require the number
# of output channels of their parent conv layer to be divisible by group.
parent_convs = self._get_parent_convs(node)
for parent in parent_convs:
if parent in self.dependency:
self.dependency[parent] = max(self.dependency[parent], group)
else:
self.dependency[parent] = group
return self.dependency
def export(self, filepath):
"""
export the group dependency to a csv file.
Each line describes a convolution layer, the
first part of each line is the Pytorch module
name of the conv layer. The second part of each
line is the group count of the filters in this layer.
Note that, the group count may be larger than this
layers original group number.
output example:
Conv layer, Groups
Conv1, 1
Conv2, 2
Conv3, 4
"""
header = ['Conv Layer Name', 'Group']
with open(filepath, 'w') as csvf:
csv_w = csv.writer(csvf, delimiter=',')
csv_w.writerow(header)
for name in self.dependency:
group = self.dependency[name]
csv_w.writerow([name, group])
| 37.643229 | 97 | 0.576825 |
import csv
import logging
__all__ = ['ChannelDependency', 'GroupDependency', 'CatPaddingDependency']
CONV_TYPE = 'aten::_convolution'
ADD_TYPES = ['aten::add', 'aten::add_']
CAT_TYPE = 'aten::cat'
logger = logging.getLogger('Shape_Dependency')
class Dependency:
def __init__(self, model=None, dummy_input=None, traced_model=None):
from nni._graph_utils import TorchModuleGraph
if traced_model is None:
assert model is not None and dummy_input is not None
self.graph = TorchModuleGraph(model, dummy_input, traced_model)
self.dependency = dict()
self.build_dependency()
def build_dependency(self):
raise NotImplementedError
def export(self, filepath):
raise NotImplementedError
class ChannelDependency(Dependency):
def __init__(self, model=None, dummy_input=None, traced_model=None):
super(ChannelDependency, self).__init__(model, dummy_input, traced_model)
def _get_parent_layers(self, node):
parent_layers = []
queue = []
queue.append(node)
while queue:
curnode = queue.pop(0)
if curnode.op_type == 'Conv2d' or curnode.op_type == 'Linear':
parent_layers.append(curnode.name)
continue
parents = self.graph.find_predecessors(curnode.unique_name)
parents = [self.graph.name_to_node[name] for name in parents]
for parent in parents:
queue.append(parent)
return parent_layers
def build_dependency(self):
self.graph.unpack_manually()
for node in self.graph.nodes_py.nodes_op:
parent_layers = []
if node.op_type in ADD_TYPES:
parent_layers = self._get_parent_layers(node)
elif node.op_type == CAT_TYPE:
cat_dim = None
for cnode in node.node_cpps:
if cnode.kind() == CAT_TYPE:
cat_dim = list(cnode.inputs())[1].toIValue()
break
if cat_dim != 1:
parent_layers = self._get_parent_layers(node)
dependency_set = set(parent_layers)
for parent in parent_layers:
if parent in self.dependency:
dependency_set.update(self.dependency[parent])
for _node in dependency_set:
self.dependency[_node] = dependency_set
def export(self, filepath):
header = ['Dependency Set', 'Layers']
setid = 0
visited = set()
with open(filepath, 'w') as csvf:
csv_w = csv.writer(csvf, delimiter=',')
csv_w.writerow(header)
for node in self.graph.nodes_py.nodes_op:
if node.op_type != 'Conv2d' or node in visited:
continue
setid += 1
row = ['Set %d' % setid]
if node.name not in self.dependency:
visited.add(node)
row.append(node.name)
else:
for other in self.dependency[node.name]:
visited.add(self.graph.name_to_node[other])
row.append(other)
csv_w.writerow(row)
@property
def dependency_sets(self):
d_sets = []
visited = set()
for node in self.graph.nodes_py.nodes_op:
if node.op_type != 'Conv2d' or node in visited:
continue
tmp_set = set()
if node.name not in self.dependency:
visited.add(node)
tmp_set.add(node.name)
else:
for other in self.dependency[node.name]:
visited.add(self.graph.name_to_node[other])
tmp_set.add(other)
d_sets.append(tmp_set)
return d_sets
class CatPaddingDependency(ChannelDependency):
def __init__(self, model=None, dummy_input=None, traced_model=None):
super(CatPaddingDependency, self).__init__(model, dummy_input, traced_model)
def build_dependency(self):
for node in self.graph.nodes_py.nodes_op:
parent_layers = []
if node.op_type == CAT_TYPE:
parent_layers = self._get_parent_layers(node)
dependency_set = set(parent_layers)
for parent in parent_layers:
if parent in self.dependency:
dependency_set.update(self.dependency[parent])
for _node in dependency_set:
self.dependency[_node] = dependency_set
@property
def dependency_sets(self):
d_sets = []
visited = set()
for nodename in self.dependency:
if nodename in visited:
continue
d_sets.append(self.dependency[nodename])
return d_sets
def export(self, filepath):
header = ['Dependency Set', 'Layers']
setid = 0
with open(filepath, 'w') as csvf:
csv_w = csv.writer(csvf, delimiter=',')
csv_w.writerow(header)
for layers in self.dependency_sets:
setid += 1
row = ['Set %d' % setid]
row.extend(list(layers))
csv_w.writerow(row)
class GroupDependency(Dependency):
def __init__(self, model=None, dummy_input=None, traced_model=None):
super(GroupDependency, self).__init__(model, dummy_input, traced_model)
def _get_parent_convs(self, node):
parent_layers = []
predeessors = self.graph.find_predecessors(node.unique_name)
predeessors = [self.graph.name_to_node[x] for x in predeessors]
queue = predeessors
while queue:
curnode = queue.pop(0)
if curnode.op_type == 'Conv2d':
parent_layers.append(curnode.name)
continue
parents = self.graph.find_predecessors(curnode.unique_name)
parents = [self.graph.name_to_node[name] for name in parents]
for parent in parents:
queue.append(parent)
return parent_layers
def _get_conv_groups(self, node_group):
cpp_conv = list(filter(lambda x: x.kind() == CONV_TYPE, node_group.node_cpps))
assert len(cpp_conv) == 1
cpp_conv = cpp_conv[0]
inputs = list(cpp_conv.inputs())
group = inputs[8].toIValue()
return group
def build_dependency(self):
for node in self.graph.nodes_py.nodes_op:
if node.op_type == 'Conv2d':
group = self._get_conv_groups(node)
if node.name in self.dependency:
self.dependency[node.name] = max(self.dependency[node.name], group)
else:
self.dependency[node.name] = group
if group > 1:
# for the conv layer whose group is larger than 1, it will require the number
# of output channels of their parent conv layer to be divisible by group.
parent_convs = self._get_parent_convs(node)
for parent in parent_convs:
if parent in self.dependency:
self.dependency[parent] = max(self.dependency[parent], group)
else:
self.dependency[parent] = group
return self.dependency
def export(self, filepath):
header = ['Conv Layer Name', 'Group']
with open(filepath, 'w') as csvf:
csv_w = csv.writer(csvf, delimiter=',')
csv_w.writerow(header)
for name in self.dependency:
group = self.dependency[name]
csv_w.writerow([name, group])
| true | true |
1c330062e0a400ccec94be55403fe68eeb51645a | 30,197 | py | Python | keras/layers/preprocessing/index_lookup.py | kanesp/keras | 7f8c62b90274f9c5a261984c098312ff8fab3d66 | [
"Apache-2.0"
] | 1 | 2021-03-16T09:37:55.000Z | 2021-03-16T09:37:55.000Z | keras/layers/preprocessing/index_lookup.py | kanesp/keras | 7f8c62b90274f9c5a261984c098312ff8fab3d66 | [
"Apache-2.0"
] | null | null | null | keras/layers/preprocessing/index_lookup.py | kanesp/keras | 7f8c62b90274f9c5a261984c098312ff8fab3d66 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras index lookup preprocessing layer."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import collections
import json
import operator
import numpy as np
from keras import backend as K
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import category_encoding
from keras.layers.preprocessing import table_utils
from keras.utils import layer_utils
from tensorflow.python.ops import lookup_ops
INT = "int"
BINARY = "binary"
COUNT = "count"
TFIDF = "tf-idf"
_VOCAB_NAME = "vocab"
_IDF_WEIGHTS_NAME = "idf_weights"
class IndexLookup(base_preprocessing_layer.CombinerPreprocessingLayer):
"""Maps values from a vocabulary to integer indices.
This layer translates a set of arbitrary hashables into an integer output via
a table-based lookup, with optional out-of-vocabulary handling. This is the
basis layer for both IntegerLookup and StringLookup; it holds the common
logic but is not intended to be exported as part of the Keras API.
Args:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this size
includes the OOV and mask tokens.
num_oov_indices: The number of out-of-vocabulary tokens to use. If this
value is more than 1, OOV inputs are hashed to determine their OOV value.
If this value is 0, OOV inputs will map to -1 when `output_mode` is "int"
and are dropped otherwise.
mask_token: A token that represents masked inputs. When `output_mode` is
"int", the token is included in vocabulary and mapped to index 0. In other
output modes, the token will not appear in the vocabulary and instances
of the mask token in the input will be dropped. If set to None, no mask
term will be added.
oov_token: Only used when `invert` is True. The token to return for OOV
indices.
vocabulary: An optional list of vocabulary terms. If the list contains the
same token multiple times, an error will be thrown.
invert: Only valid when `output_mode` is "int". If True, this layer will map
indices to vocabulary items instead of mapping vocabulary items to
indices. Default to False.
output_mode: Specification for the output of the layer. Defaults to "int".
Values can be "int", "binary", "count", or "tf-idf" configuring the layer
as follows:
"int": Return the raw integer indices of the input tokens.
"binary": Outputs a single int array per sample, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the sample.
"count": Like "binary", but the int array contains a count of the number
of times the token at that index appeared in the sample.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
pad_to_max_tokens: Only valid when `output_mode` is "binary", "count", or
"tf-idf". If True, the output will have its feature axis padded to
`max_tokens` even if the number of unique tokens in the vocabulary is less
than max_tokens, resulting in a tensor of shape [batch_size, max_tokens]
regardless of vocabulary size. Defaults to False.
sparse: Boolean. Only applicable to "binary" and "count" output modes.
If True, returns a `SparseTensor` instead of a dense `Tensor`.
Defaults to False.
"""
def __init__(self,
max_tokens,
num_oov_indices,
mask_token,
oov_token,
vocabulary=None,
invert=False,
output_mode=INT,
sparse=False,
pad_to_max_tokens=False,
**kwargs):
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError("If set, `max_tokens` must be greater than 1. "
"You passed {}".format(max_tokens))
if num_oov_indices < 0:
raise ValueError("`num_oov_indices` must be greater than or equal to 0. "
"You passed {}".format(num_oov_indices))
# 'output_mode' must be one of (INT, BINARY, COUNT, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, BINARY, COUNT, TFIDF),
layer_name=self.__class__.__name__,
arg_name="output_mode")
if invert and output_mode != INT:
raise ValueError("`output_mode` must be {} when `invert` is true. You "
"passed {}".format(INT, output_mode))
self.invert = invert
self.max_tokens = max_tokens
self.num_oov_indices = num_oov_indices
self.oov_token = oov_token
self.mask_token = mask_token
self.output_mode = output_mode
self.sparse = sparse
self.pad_to_max_tokens = pad_to_max_tokens
self._called = False
self._vocab_size = 0
# We need to keep track our current vocab size outside of our layer weights
# to support a static output shape when `output_mode != INT`. The bincount
# ops do not set shape on their outputs, which means we have to set it
# ourselves. We persist the current vocab size as a hidden part of the
# config when serializing our model.
if "vocab_size" in kwargs:
self._vocab_size = kwargs["vocab_size"]
del kwargs["vocab_size"]
if max_tokens is not None:
available_vocab_size = max_tokens - self._token_start_index()
else:
available_vocab_size = None
super(IndexLookup, self).__init__(
combiner=_IndexLookupCombiner(
vocab_size=available_vocab_size,
mask_value=mask_token,
oov_value=oov_token,
compute_idf=(output_mode == TFIDF)),
**kwargs)
# We need to save the key dtype so that we know if we're expecting int64
# keys. If we are, we will cast int32 inputs to int64 as well.
if invert:
self._key_dtype = tf.int64
self._value_dtype = self.dtype
self._mask_key = 0
self._mask_value = mask_token
default_value = self.oov_token
oov_indices = None
else:
self._key_dtype = self.dtype
self._value_dtype = tf.int64
self._mask_key = mask_token
# Masks should map to 0 for int output and be dropped otherwise. Max ints
# will be dropped from the bincount op.
self._mask_value = 0 if self.output_mode == INT else tf.int64.max
oov_start = self._oov_start_index()
token_start = self._token_start_index()
if self.num_oov_indices == 0:
# If there are no OOV indices, we map OOV tokens to -1 for int output
# and drop them from bagged output. Max ints will be dropped from the
# bincount op.
default_value = -1 if self.output_mode == INT else tf.int64.max
oov_indices = None
elif self.num_oov_indices == 1:
# If there is only one OOV index, we can set that index as the default
# value of the index_lookup table.
default_value = oov_start
oov_indices = None
else:
# If we hav multiple OOV values, we need to do a further hashing step;
# to make this easier, we set the OOV value to -1. (This lets us do a
# vectorized add and cast to boolean to determine locations where we
# need to do extra hashing.)
default_value = -1
oov_indices = list(range(oov_start, token_start))
if vocabulary is not None and isinstance(vocabulary,
tf.lookup.TextFileInitializer):
self._table = self._static_table_class()(
vocabulary, default_value=default_value)
self._table_handler = table_utils.TableHandler(
table=self._table,
mask_token=mask_token,
oov_tokens=oov_indices,
use_v1_apis=self._use_v1_apis())
self.max_tokens = (
self._table_handler.table_size() + self.num_oov_indices +
(0 if mask_token is None else 1))
else:
self._table = lookup_ops.MutableHashTable(
key_dtype=self._key_dtype,
value_dtype=self._value_dtype,
default_value=default_value,
name=(self._name + "_index_table"))
self._table_handler = table_utils.TableHandler(
table=self._table,
oov_tokens=oov_indices,
use_v1_apis=self._use_v1_apis())
if vocabulary is not None:
self.set_vocabulary(vocabulary)
if self.output_mode == TFIDF:
# The TF-IDF weight may have a (None,) tensorshape. This creates
# a 1D variable with arbitrary shape, which we can assign any weight to
# so long as it has 1 dimension. In order to properly initialize this
# weight in Keras, we need to provide a custom callable initializer which
# does not depend on the shape of the weight (as all other initializers
# do) since the weight is not known. Hence the lambda shape, dtype: [0].
if not self.pad_to_max_tokens or max_tokens is None:
initializer = lambda shape, dtype: [0]
else:
initializer = tf.compat.v1.zeros_initializer
# We are adding these here instead of in build() since they do not depend
# on the input shape at all.
idf_shape = (max_tokens,) if self.pad_to_max_tokens else (None,)
self.tf_idf_weights = self._add_state_variable(
name="idf",
shape=tf.TensorShape(idf_shape),
dtype=K.floatx(),
initializer=initializer)
tracked_table = self._add_trackable(self._table, trainable=False)
# This is a workaround for summary() on this layer. Because the table is
# not mutable during training, the effective number of parameters (and so
# the weight shape) is 0; we add this as an attr so that the parameter
# counting code in the Model object doesn't throw an attribute error.
tracked_table.shape = tf.TensorShape((0,))
def compute_output_shape(self, input_shape):
if self.output_mode == INT:
return input_shape
if self._vocab_size and not self.pad_to_max_tokens:
out_depth = self._vocab_size
else:
out_depth = self.max_tokens
return tf.TensorShape([input_shape[0], out_depth])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = self._value_dtype if self.output_mode == INT else K.floatx()
return tf.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Args:
data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
"""
if not reset_state:
raise ValueError("IndexLookup does not support streaming adapts.")
super(IndexLookup, self).adapt(data, reset_state)
def get_vocabulary(self):
if self._vocab_size == 0:
return []
# The MutableHashTable data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices [0, vocab_size).
keys, values = self._table_handler.data()
if self.invert:
index_to_token = zip(keys, values)
else:
index_to_token = zip(values, keys)
lookup = collections.defaultdict(lambda: self.oov_token, index_to_token)
return [lookup[x] for x in range(self._vocab_size)]
def vocab_size(self):
return self._vocab_size
def get_config(self):
config = {
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocab_size": self._vocab_size
}
base_config = super(IndexLookup, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self, vocab, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through 'adapt'. It should be used whenever
the vocab (and optionally document frequency) information is already known.
If vocabulary data is already present in the layer, this method will replace
it.
Args:
vocab: An array of hashable tokens.
idf_weights: An array of inverse document frequency weights with equal
length to vocab. Only necessary if the layer output_mode is TFIDF.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when "binary", "count", and "tfidf" modes,
if "pad_to_max_tokens" is False and the layer itself has already been
called.
"""
if self.output_mode != TFIDF and idf_weights is not None:
raise ValueError("`idf_weights` should only be set if output_mode is "
"TFIDF. output_mode is {}.".format(self.output_mode))
if (self.output_mode in [BINARY, COUNT, TFIDF] and self._called and
not self.pad_to_max_tokens):
raise RuntimeError("When using {} mode and `pad_to_max_tokens` is "
"False, the vocabulary cannot be changed after the "
"layer is called.".format(self.output_mode))
oov_start = self._oov_start_index()
token_start = self._token_start_index()
should_have_mask = (oov_start > 0)
has_mask = should_have_mask and vocab[0] == self.mask_token
should_have_oov = (self.num_oov_indices > 0)
expected_oov = [self.oov_token] * self.num_oov_indices
found_oov = vocab[oov_start:token_start]
has_oov = should_have_oov and found_oov == expected_oov
# If we get a numpy array, then has_oov may end up being a numpy array
# instead of a bool. Fix this by collapsing the variable if it's not bool.
if not isinstance(has_oov, bool):
has_oov = any(has_oov)
if all([should_have_mask, has_mask, should_have_oov]) and not has_oov:
raise ValueError(
"Invalid vocabulary format. The layer was created with "
"`mask_token={mask}` and `oov_token={oov}`. These tokens should be "
"included in the provided vocabulary. The passed vocabulary has the "
"correct mask token `{mask}` at index 0, but does not have the OOV "
"token `{oov}` in indices [{start}:{end}]. Instead, we found "
"`{found}`. Was this vocabulary generated by a layer with "
"incompatible settings?".format(
mask=self.mask_token,
oov=self.oov_token,
start=oov_start,
end=token_start,
found=found_oov))
if all([should_have_oov, has_oov, should_have_mask]) and not has_mask:
raise ValueError(
"Invalid vocabulary format. The layer was created with "
"`mask_token={mask}` and `oov_token={oov}`. These tokens should be "
"included in the provided vocabulary. The passed vocabulary has the "
"correct OOV token `{oov}` at indices [{start}:{end}], but does not "
"have the mask token `{mask}` in index 0. Instead, we found "
"`{found}`. Was this vocabulary generated by a layer with "
"incompatible settings?".format(
mask=self.mask_token,
oov=self.oov_token,
start=oov_start,
end=token_start,
found=vocab[0]))
found_special_tokens = has_oov or has_mask
if found_special_tokens:
tokens = vocab[token_start:]
else:
tokens = vocab
repeated_tokens = table_utils.find_repeated_tokens(tokens)
if repeated_tokens:
raise ValueError("The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
"are {}".format(repeated_tokens))
if self.mask_token in tokens:
raise ValueError("Reserved mask token {} was found in the passed "
"vocabulary at index {}. Please either remove the "
"reserved token from the vocabulary or change the "
"mask token for this layer.".format(
self.mask_token, tokens.index(self.mask_token)))
if self.oov_token in tokens:
raise ValueError("Reserved OOV token {} was found in the passed "
"vocabulary at index {}. Please either remove the "
"reserved token from the vocabulary or change the "
"OOV token for this layer.".format(
self.oov_token, tokens.index(self.oov_token)))
self._vocab_size = token_start + len(tokens)
if self.max_tokens is not None and self._vocab_size > self.max_tokens:
raise ValueError(
"Attempted to set a vocabulary larger than the maximum vocab size. "
"Passed vocab size is {}, max vocab size is {}.".format(
self._vocab_size, self.max_tokens))
if self.output_mode == TFIDF:
if idf_weights is None:
raise ValueError("`idf_weights` must be set if output_mode is TFIDF")
if len(vocab) != len(idf_weights):
raise ValueError("`idf_weights` must be the same length as vocab. "
"len(idf_weights) is {}, len(vocab) is {}".format(
len(vocab), len(idf_weights)))
idf_weights = self._convert_to_ndarray(idf_weights)
if idf_weights.ndim != 1:
raise ValueError(
"TF-IDF data must be a 1-index array, but received {}".format(
type(idf_weights)))
# We add the non-special vocab tokens and optionally the mask_token to our
# hash table. OOV tokens are handled with the hash table default value and
# not added directly.
self._table_handler.clear()
indices = np.arange(token_start, len(tokens) + token_start, dtype=np.int64)
if self.invert:
self._table_handler.insert(indices, tokens)
else:
self._table_handler.insert(tokens, indices)
if self.mask_token is not None:
self._table_handler.insert([self._mask_key], [self._mask_value])
if self.output_mode == TFIDF:
# If the passed vocabulary has no special tokens, we need to pad the front
# of idf_weights. We don't have real document frequencies for these tokens
# so we will use an average of all idf_weights passed in as a reasonable
# default.
if found_special_tokens:
front_padding = 0
front_padding_value = 0
else:
front_padding = token_start
front_padding_value = np.average(idf_weights)
# If pad_to_max_tokens is true, and max_tokens is greater than our total
# vocab size, we need to pad the back of idf_weights with zeros as well.
back_padding_value = 0
if self.pad_to_max_tokens and self.max_tokens is not None:
back_padding = self.max_tokens - front_padding - len(idf_weights)
else:
back_padding = 0
idf_weights = np.pad(
idf_weights, (front_padding, back_padding),
"constant",
constant_values=(front_padding_value, back_padding_value))
K.set_value(self.tf_idf_weights, idf_weights)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
self.set_vocabulary(
updates[_VOCAB_NAME], idf_weights=updates[_IDF_WEIGHTS_NAME])
def call(self, inputs):
if not self.max_tokens and not self._vocab_size:
raise ValueError("You must set the layer's vocabulary before calling it. "
"Either pass a `vocabulary` argument to the layer, or "
"call `layer.adapt(dataset)` with some sample data.")
self._called = True
if self._key_dtype == tf.int64 and inputs.dtype == tf.int32:
inputs = tf.cast(inputs, tf.int64)
lookup_result = self._table_handler.lookup(inputs)
if self.output_mode == INT:
return lookup_result
binary_output = (self.output_mode == BINARY)
if self._vocab_size and not self.pad_to_max_tokens:
out_depth = self._vocab_size
else:
out_depth = self.max_tokens
if self.sparse:
bincounts = category_encoding.sparse_bincount(lookup_result, out_depth,
binary_output)
else:
bincounts = category_encoding.dense_bincount(lookup_result, out_depth,
binary_output)
if self.output_mode == TFIDF:
return tf.multiply(bincounts, self.tf_idf_weights)
return bincounts
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def _use_v1_apis(self):
return False
def _static_table_class(self):
return tf.lookup.StaticHashTable
def _oov_start_index(self):
return 1 if self.mask_token is not None and self.output_mode == INT else 0
def _token_start_index(self):
return self._oov_start_index() + self.num_oov_indices
class _IndexLookupAccumulator(
collections.namedtuple("Accumulator",
["data", "count_dict", "per_doc_count_dict"])):
pass
class _IndexLookupCombiner(base_preprocessing_layer.Combiner):
"""Combiner for the IndexLookup preprocessing layer.
This class encapsulates the logic for computing a vocabulary based on the
frequency of each token.
Attributes:
vocab_size: (Optional) If set, only the top `vocab_size` tokens (based on
frequency across the dataset) are retained in the vocabulary. If None, or
set to a value greater than the total number of distinct tokens in the
dataset, all tokens are retained.
"""
def __init__(self,
vocab_size=None,
mask_value=None,
oov_value=None,
compute_idf=False):
self._vocab_size = vocab_size
self._mask_value = mask_value
self._oov_value = oov_value
self._compute_idf = compute_idf
def compute(self, values, accumulator=None):
"""Compute a step in this computation, returning a new accumulator."""
values = base_preprocessing_layer.convert_to_list(
values, sparse_default_value=self._mask_value)
if accumulator is None:
accumulator = self._create_accumulator()
# TODO(momernick): Benchmark improvements to this algorithm.
if not isinstance(values, list):
values = [values]
for document in values:
if not isinstance(document, list):
document = [document]
if self._compute_idf:
current_doc_id = accumulator.data["next_doc_id"]
accumulator.data["next_doc_id"] += 1
for token in document:
accumulator.count_dict[token] += 1
if self._compute_idf:
doc_count = accumulator.per_doc_count_dict[token]
if doc_count["last_doc_id"] != current_doc_id:
doc_count["count"] += 1
doc_count["last_doc_id"] = current_doc_id
return accumulator
def merge(self, accumulators):
"""Merge several accumulators to a single accumulator."""
if not accumulators:
return accumulators
base_accumulator = accumulators[0]
for accumulator in accumulators[1:]:
for token, value in accumulator.count_dict.items():
base_accumulator.count_dict[token] += value
if self._compute_idf:
base_accumulator.data["next_doc_id"] += accumulator.data["next_doc_id"]
if self._compute_idf:
for token, value in accumulator.per_doc_count_dict.items():
# Any newly created token counts in 'base_accumulator''s
# per_doc_count_dict will have a last_doc_id of -1. This is always
# less than the next doc id (which are strictly positive), so any
# future occurrences are guaranteed to be counted.
base_accumulator.per_doc_count_dict[token]["count"] += value[
"count"]
return base_accumulator
def extract(self, accumulator):
"""Convert an accumulator into a dict of output values.
Args:
accumulator: An accumulator aggregating over the full dataset.
Returns:
A dict of:
"vocab": A list of the retained items in the vocabulary.
"""
vocab_counts = accumulator.count_dict
# Drop special tokens from our vocab.
if self._mask_value in vocab_counts:
del vocab_counts[self._mask_value]
if self._oov_value in vocab_counts:
del vocab_counts[self._oov_value]
# Data processed by the accumulator could be tensors, numpy arrays or lists.
# For tensor string input, values will have been converted into bytes. We
# need to check the bytes version of special tokens in this case.
if isinstance(self._mask_value, str):
mask_value_bytes = tf.compat.as_bytes(self._mask_value)
if mask_value_bytes in vocab_counts:
del vocab_counts[mask_value_bytes]
if isinstance(self._oov_value, str):
oov_value_bytes = tf.compat.as_bytes(self._oov_value)
if oov_value_bytes in vocab_counts:
del vocab_counts[oov_value_bytes]
sorted_counts = sorted(
vocab_counts.items(), key=operator.itemgetter(1, 0), reverse=True)
vocab_data = (
sorted_counts[:self._vocab_size] if self._vocab_size else sorted_counts)
vocab = [data[0] for data in vocab_data]
if self._compute_idf:
num_documents = accumulator.data["next_doc_id"]
document_counts = accumulator.per_doc_count_dict
doc_counts = [document_counts[token]["count"] for token in vocab]
idf_weights = self._inverse_document_frequency(doc_counts, num_documents)
else:
idf_weights = None
return {_VOCAB_NAME: vocab, _IDF_WEIGHTS_NAME: idf_weights}
def restore(self, output):
"""Create an accumulator based on 'output'."""
raise NotImplementedError(
"IndexLookup does not restore or support streaming updates.")
def serialize(self, accumulator):
"""Serialize an accumulator for a remote call."""
output_dict = {}
output_dict["vocab"] = list(accumulator.count_dict.keys())
output_dict["vocab_counts"] = list(accumulator.count_dict.values())
if self._compute_idf:
output_dict["data"] = accumulator.data
output_dict["idf_vocab"] = list(accumulator.per_doc_count_dict.keys())
output_dict["idf_counts"] = [
counter["count"]
for counter in accumulator.per_doc_count_dict.values()
]
return tf.compat.as_bytes(json.dumps(output_dict))
def deserialize(self, encoded_accumulator):
"""Deserialize an accumulator received from 'serialize()'."""
accumulator_dict = json.loads(tf.compat.as_text(encoded_accumulator))
accumulator = self._create_accumulator()
count_dict = dict(
zip(accumulator_dict["vocab"], accumulator_dict["vocab_counts"]))
accumulator.count_dict.update(count_dict)
if self._compute_idf:
accumulator.data = accumulator_dict["data"]
create_dict = lambda x: {"count": x, "last_doc_id": -1}
idf_count_dicts = [
create_dict(count) for count in accumulator_dict["idf_counts"]
]
idf_dict = dict(zip(accumulator_dict["idf_vocab"], idf_count_dicts))
accumulator.per_doc_count_dict.update(idf_dict)
return accumulator
def _create_accumulator(self):
"""Accumulate a sorted array of vocab tokens and corresponding counts."""
if self._compute_idf:
create_default_dict = lambda: {"count": 0, "last_doc_id": -1}
per_doc_count_dict = collections.defaultdict(create_default_dict)
data = {"next_doc_id": 0}
else:
per_doc_count_dict = None
data = None
count_dict = collections.defaultdict(int)
return _IndexLookupAccumulator(data, count_dict, per_doc_count_dict)
def _inverse_document_frequency(self, document_counts, num_documents):
"""Computes the inverse-document-frequency (IDF) component of TFIDF.
Uses the default weighting scheme described in
https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
Args:
document_counts: An array of the # of documents each token appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return np.log(1 + num_documents / (1 + np.array(document_counts)))
| 42.11576 | 80 | 0.675498 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import collections
import json
import operator
import numpy as np
from keras import backend as K
from keras.engine import base_preprocessing_layer
from keras.layers.preprocessing import category_encoding
from keras.layers.preprocessing import table_utils
from keras.utils import layer_utils
from tensorflow.python.ops import lookup_ops
INT = "int"
BINARY = "binary"
COUNT = "count"
TFIDF = "tf-idf"
_VOCAB_NAME = "vocab"
_IDF_WEIGHTS_NAME = "idf_weights"
class IndexLookup(base_preprocessing_layer.CombinerPreprocessingLayer):
def __init__(self,
max_tokens,
num_oov_indices,
mask_token,
oov_token,
vocabulary=None,
invert=False,
output_mode=INT,
sparse=False,
pad_to_max_tokens=False,
**kwargs):
if max_tokens is not None and max_tokens <= 1:
raise ValueError("If set, `max_tokens` must be greater than 1. "
"You passed {}".format(max_tokens))
if num_oov_indices < 0:
raise ValueError("`num_oov_indices` must be greater than or equal to 0. "
"You passed {}".format(num_oov_indices))
# 'output_mode' must be one of (INT, BINARY, COUNT, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, BINARY, COUNT, TFIDF),
layer_name=self.__class__.__name__,
arg_name="output_mode")
if invert and output_mode != INT:
raise ValueError("`output_mode` must be {} when `invert` is true. You "
"passed {}".format(INT, output_mode))
self.invert = invert
self.max_tokens = max_tokens
self.num_oov_indices = num_oov_indices
self.oov_token = oov_token
self.mask_token = mask_token
self.output_mode = output_mode
self.sparse = sparse
self.pad_to_max_tokens = pad_to_max_tokens
self._called = False
self._vocab_size = 0
# We need to keep track our current vocab size outside of our layer weights
# to support a static output shape when `output_mode != INT`. The bincount
# ops do not set shape on their outputs, which means we have to set it
# ourselves. We persist the current vocab size as a hidden part of the
# config when serializing our model.
if "vocab_size" in kwargs:
self._vocab_size = kwargs["vocab_size"]
del kwargs["vocab_size"]
if max_tokens is not None:
available_vocab_size = max_tokens - self._token_start_index()
else:
available_vocab_size = None
super(IndexLookup, self).__init__(
combiner=_IndexLookupCombiner(
vocab_size=available_vocab_size,
mask_value=mask_token,
oov_value=oov_token,
compute_idf=(output_mode == TFIDF)),
**kwargs)
# We need to save the key dtype so that we know if we're expecting int64
if invert:
self._key_dtype = tf.int64
self._value_dtype = self.dtype
self._mask_key = 0
self._mask_value = mask_token
default_value = self.oov_token
oov_indices = None
else:
self._key_dtype = self.dtype
self._value_dtype = tf.int64
self._mask_key = mask_token
self._mask_value = 0 if self.output_mode == INT else tf.int64.max
oov_start = self._oov_start_index()
token_start = self._token_start_index()
if self.num_oov_indices == 0:
default_value = -1 if self.output_mode == INT else tf.int64.max
oov_indices = None
elif self.num_oov_indices == 1:
default_value = oov_start
oov_indices = None
else:
default_value = -1
oov_indices = list(range(oov_start, token_start))
if vocabulary is not None and isinstance(vocabulary,
tf.lookup.TextFileInitializer):
self._table = self._static_table_class()(
vocabulary, default_value=default_value)
self._table_handler = table_utils.TableHandler(
table=self._table,
mask_token=mask_token,
oov_tokens=oov_indices,
use_v1_apis=self._use_v1_apis())
self.max_tokens = (
self._table_handler.table_size() + self.num_oov_indices +
(0 if mask_token is None else 1))
else:
self._table = lookup_ops.MutableHashTable(
key_dtype=self._key_dtype,
value_dtype=self._value_dtype,
default_value=default_value,
name=(self._name + "_index_table"))
self._table_handler = table_utils.TableHandler(
table=self._table,
oov_tokens=oov_indices,
use_v1_apis=self._use_v1_apis())
if vocabulary is not None:
self.set_vocabulary(vocabulary)
if self.output_mode == TFIDF:
if not self.pad_to_max_tokens or max_tokens is None:
initializer = lambda shape, dtype: [0]
else:
initializer = tf.compat.v1.zeros_initializer
idf_shape = (max_tokens,) if self.pad_to_max_tokens else (None,)
self.tf_idf_weights = self._add_state_variable(
name="idf",
shape=tf.TensorShape(idf_shape),
dtype=K.floatx(),
initializer=initializer)
tracked_table = self._add_trackable(self._table, trainable=False)
tracked_table.shape = tf.TensorShape((0,))
def compute_output_shape(self, input_shape):
if self.output_mode == INT:
return input_shape
if self._vocab_size and not self.pad_to_max_tokens:
out_depth = self._vocab_size
else:
out_depth = self.max_tokens
return tf.TensorShape([input_shape[0], out_depth])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = self._value_dtype if self.output_mode == INT else K.floatx()
return tf.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
if not reset_state:
raise ValueError("IndexLookup does not support streaming adapts.")
super(IndexLookup, self).adapt(data, reset_state)
def get_vocabulary(self):
if self._vocab_size == 0:
return []
# The MutableHashTable data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices [0, vocab_size).
keys, values = self._table_handler.data()
if self.invert:
index_to_token = zip(keys, values)
else:
index_to_token = zip(values, keys)
lookup = collections.defaultdict(lambda: self.oov_token, index_to_token)
return [lookup[x] for x in range(self._vocab_size)]
def vocab_size(self):
return self._vocab_size
def get_config(self):
config = {
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocab_size": self._vocab_size
}
base_config = super(IndexLookup, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self, vocab, idf_weights=None):
if self.output_mode != TFIDF and idf_weights is not None:
raise ValueError("`idf_weights` should only be set if output_mode is "
"TFIDF. output_mode is {}.".format(self.output_mode))
if (self.output_mode in [BINARY, COUNT, TFIDF] and self._called and
not self.pad_to_max_tokens):
raise RuntimeError("When using {} mode and `pad_to_max_tokens` is "
"False, the vocabulary cannot be changed after the "
"layer is called.".format(self.output_mode))
oov_start = self._oov_start_index()
token_start = self._token_start_index()
should_have_mask = (oov_start > 0)
has_mask = should_have_mask and vocab[0] == self.mask_token
should_have_oov = (self.num_oov_indices > 0)
expected_oov = [self.oov_token] * self.num_oov_indices
found_oov = vocab[oov_start:token_start]
has_oov = should_have_oov and found_oov == expected_oov
# If we get a numpy array, then has_oov may end up being a numpy array
# instead of a bool. Fix this by collapsing the variable if it's not bool.
if not isinstance(has_oov, bool):
has_oov = any(has_oov)
if all([should_have_mask, has_mask, should_have_oov]) and not has_oov:
raise ValueError(
"Invalid vocabulary format. The layer was created with "
"`mask_token={mask}` and `oov_token={oov}`. These tokens should be "
"included in the provided vocabulary. The passed vocabulary has the "
"correct mask token `{mask}` at index 0, but does not have the OOV "
"token `{oov}` in indices [{start}:{end}]. Instead, we found "
"`{found}`. Was this vocabulary generated by a layer with "
"incompatible settings?".format(
mask=self.mask_token,
oov=self.oov_token,
start=oov_start,
end=token_start,
found=found_oov))
if all([should_have_oov, has_oov, should_have_mask]) and not has_mask:
raise ValueError(
"Invalid vocabulary format. The layer was created with "
"`mask_token={mask}` and `oov_token={oov}`. These tokens should be "
"included in the provided vocabulary. The passed vocabulary has the "
"correct OOV token `{oov}` at indices [{start}:{end}], but does not "
"have the mask token `{mask}` in index 0. Instead, we found "
"`{found}`. Was this vocabulary generated by a layer with "
"incompatible settings?".format(
mask=self.mask_token,
oov=self.oov_token,
start=oov_start,
end=token_start,
found=vocab[0]))
found_special_tokens = has_oov or has_mask
if found_special_tokens:
tokens = vocab[token_start:]
else:
tokens = vocab
repeated_tokens = table_utils.find_repeated_tokens(tokens)
if repeated_tokens:
raise ValueError("The passed vocabulary has at least one repeated "
"term. Please uniquify your dataset. The repeated terms "
"are {}".format(repeated_tokens))
if self.mask_token in tokens:
raise ValueError("Reserved mask token {} was found in the passed "
"vocabulary at index {}. Please either remove the "
"reserved token from the vocabulary or change the "
"mask token for this layer.".format(
self.mask_token, tokens.index(self.mask_token)))
if self.oov_token in tokens:
raise ValueError("Reserved OOV token {} was found in the passed "
"vocabulary at index {}. Please either remove the "
"reserved token from the vocabulary or change the "
"OOV token for this layer.".format(
self.oov_token, tokens.index(self.oov_token)))
self._vocab_size = token_start + len(tokens)
if self.max_tokens is not None and self._vocab_size > self.max_tokens:
raise ValueError(
"Attempted to set a vocabulary larger than the maximum vocab size. "
"Passed vocab size is {}, max vocab size is {}.".format(
self._vocab_size, self.max_tokens))
if self.output_mode == TFIDF:
if idf_weights is None:
raise ValueError("`idf_weights` must be set if output_mode is TFIDF")
if len(vocab) != len(idf_weights):
raise ValueError("`idf_weights` must be the same length as vocab. "
"len(idf_weights) is {}, len(vocab) is {}".format(
len(vocab), len(idf_weights)))
idf_weights = self._convert_to_ndarray(idf_weights)
if idf_weights.ndim != 1:
raise ValueError(
"TF-IDF data must be a 1-index array, but received {}".format(
type(idf_weights)))
self._table_handler.clear()
indices = np.arange(token_start, len(tokens) + token_start, dtype=np.int64)
if self.invert:
self._table_handler.insert(indices, tokens)
else:
self._table_handler.insert(tokens, indices)
if self.mask_token is not None:
self._table_handler.insert([self._mask_key], [self._mask_value])
if self.output_mode == TFIDF:
# so we will use an average of all idf_weights passed in as a reasonable
# default.
if found_special_tokens:
front_padding = 0
front_padding_value = 0
else:
front_padding = token_start
front_padding_value = np.average(idf_weights)
# If pad_to_max_tokens is true, and max_tokens is greater than our total
# vocab size, we need to pad the back of idf_weights with zeros as well.
back_padding_value = 0
if self.pad_to_max_tokens and self.max_tokens is not None:
back_padding = self.max_tokens - front_padding - len(idf_weights)
else:
back_padding = 0
idf_weights = np.pad(
idf_weights, (front_padding, back_padding),
"constant",
constant_values=(front_padding_value, back_padding_value))
K.set_value(self.tf_idf_weights, idf_weights)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
self.set_vocabulary(
updates[_VOCAB_NAME], idf_weights=updates[_IDF_WEIGHTS_NAME])
def call(self, inputs):
if not self.max_tokens and not self._vocab_size:
raise ValueError("You must set the layer's vocabulary before calling it. "
"Either pass a `vocabulary` argument to the layer, or "
"call `layer.adapt(dataset)` with some sample data.")
self._called = True
if self._key_dtype == tf.int64 and inputs.dtype == tf.int32:
inputs = tf.cast(inputs, tf.int64)
lookup_result = self._table_handler.lookup(inputs)
if self.output_mode == INT:
return lookup_result
binary_output = (self.output_mode == BINARY)
if self._vocab_size and not self.pad_to_max_tokens:
out_depth = self._vocab_size
else:
out_depth = self.max_tokens
if self.sparse:
bincounts = category_encoding.sparse_bincount(lookup_result, out_depth,
binary_output)
else:
bincounts = category_encoding.dense_bincount(lookup_result, out_depth,
binary_output)
if self.output_mode == TFIDF:
return tf.multiply(bincounts, self.tf_idf_weights)
return bincounts
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def _use_v1_apis(self):
return False
def _static_table_class(self):
return tf.lookup.StaticHashTable
def _oov_start_index(self):
return 1 if self.mask_token is not None and self.output_mode == INT else 0
def _token_start_index(self):
return self._oov_start_index() + self.num_oov_indices
class _IndexLookupAccumulator(
collections.namedtuple("Accumulator",
["data", "count_dict", "per_doc_count_dict"])):
pass
class _IndexLookupCombiner(base_preprocessing_layer.Combiner):
def __init__(self,
vocab_size=None,
mask_value=None,
oov_value=None,
compute_idf=False):
self._vocab_size = vocab_size
self._mask_value = mask_value
self._oov_value = oov_value
self._compute_idf = compute_idf
def compute(self, values, accumulator=None):
values = base_preprocessing_layer.convert_to_list(
values, sparse_default_value=self._mask_value)
if accumulator is None:
accumulator = self._create_accumulator()
if not isinstance(values, list):
values = [values]
for document in values:
if not isinstance(document, list):
document = [document]
if self._compute_idf:
current_doc_id = accumulator.data["next_doc_id"]
accumulator.data["next_doc_id"] += 1
for token in document:
accumulator.count_dict[token] += 1
if self._compute_idf:
doc_count = accumulator.per_doc_count_dict[token]
if doc_count["last_doc_id"] != current_doc_id:
doc_count["count"] += 1
doc_count["last_doc_id"] = current_doc_id
return accumulator
def merge(self, accumulators):
if not accumulators:
return accumulators
base_accumulator = accumulators[0]
for accumulator in accumulators[1:]:
for token, value in accumulator.count_dict.items():
base_accumulator.count_dict[token] += value
if self._compute_idf:
base_accumulator.data["next_doc_id"] += accumulator.data["next_doc_id"]
if self._compute_idf:
for token, value in accumulator.per_doc_count_dict.items():
# per_doc_count_dict will have a last_doc_id of -1. This is always
# less than the next doc id (which are strictly positive), so any
# future occurrences are guaranteed to be counted.
base_accumulator.per_doc_count_dict[token]["count"] += value[
"count"]
return base_accumulator
def extract(self, accumulator):
vocab_counts = accumulator.count_dict
# Drop special tokens from our vocab.
if self._mask_value in vocab_counts:
del vocab_counts[self._mask_value]
if self._oov_value in vocab_counts:
del vocab_counts[self._oov_value]
# Data processed by the accumulator could be tensors, numpy arrays or lists.
# For tensor string input, values will have been converted into bytes. We
# need to check the bytes version of special tokens in this case.
if isinstance(self._mask_value, str):
mask_value_bytes = tf.compat.as_bytes(self._mask_value)
if mask_value_bytes in vocab_counts:
del vocab_counts[mask_value_bytes]
if isinstance(self._oov_value, str):
oov_value_bytes = tf.compat.as_bytes(self._oov_value)
if oov_value_bytes in vocab_counts:
del vocab_counts[oov_value_bytes]
sorted_counts = sorted(
vocab_counts.items(), key=operator.itemgetter(1, 0), reverse=True)
vocab_data = (
sorted_counts[:self._vocab_size] if self._vocab_size else sorted_counts)
vocab = [data[0] for data in vocab_data]
if self._compute_idf:
num_documents = accumulator.data["next_doc_id"]
document_counts = accumulator.per_doc_count_dict
doc_counts = [document_counts[token]["count"] for token in vocab]
idf_weights = self._inverse_document_frequency(doc_counts, num_documents)
else:
idf_weights = None
return {_VOCAB_NAME: vocab, _IDF_WEIGHTS_NAME: idf_weights}
def restore(self, output):
raise NotImplementedError(
"IndexLookup does not restore or support streaming updates.")
def serialize(self, accumulator):
output_dict = {}
output_dict["vocab"] = list(accumulator.count_dict.keys())
output_dict["vocab_counts"] = list(accumulator.count_dict.values())
if self._compute_idf:
output_dict["data"] = accumulator.data
output_dict["idf_vocab"] = list(accumulator.per_doc_count_dict.keys())
output_dict["idf_counts"] = [
counter["count"]
for counter in accumulator.per_doc_count_dict.values()
]
return tf.compat.as_bytes(json.dumps(output_dict))
def deserialize(self, encoded_accumulator):
accumulator_dict = json.loads(tf.compat.as_text(encoded_accumulator))
accumulator = self._create_accumulator()
count_dict = dict(
zip(accumulator_dict["vocab"], accumulator_dict["vocab_counts"]))
accumulator.count_dict.update(count_dict)
if self._compute_idf:
accumulator.data = accumulator_dict["data"]
create_dict = lambda x: {"count": x, "last_doc_id": -1}
idf_count_dicts = [
create_dict(count) for count in accumulator_dict["idf_counts"]
]
idf_dict = dict(zip(accumulator_dict["idf_vocab"], idf_count_dicts))
accumulator.per_doc_count_dict.update(idf_dict)
return accumulator
def _create_accumulator(self):
if self._compute_idf:
create_default_dict = lambda: {"count": 0, "last_doc_id": -1}
per_doc_count_dict = collections.defaultdict(create_default_dict)
data = {"next_doc_id": 0}
else:
per_doc_count_dict = None
data = None
count_dict = collections.defaultdict(int)
return _IndexLookupAccumulator(data, count_dict, per_doc_count_dict)
def _inverse_document_frequency(self, document_counts, num_documents):
return np.log(1 + num_documents / (1 + np.array(document_counts)))
| true | true |
1c33009f274ee36e4bf41a2d5ea5c86909d87893 | 595 | py | Python | test_project/manage.py | bigsassy/django-facetools | aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c | [
"MIT"
] | 2 | 2018-01-24T20:41:27.000Z | 2019-06-27T13:24:18.000Z | test_project/manage.py | bigsassy/django-facetools | aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c | [
"MIT"
] | null | null | null | test_project/manage.py | bigsassy/django-facetools | aeedaea81ab0007ee8e96b2f81f1404dc8bddb3c | [
"MIT"
] | null | null | null | import os
import sys
from django.core.management import execute_manager
import imp
pwd = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(pwd, '../')))
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| 29.75 | 216 | 0.739496 | import os
import sys
from django.core.management import execute_manager
import imp
pwd = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(pwd, '../')))
try:
imp.find_module('settings')
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| true | true |
1c3301b06f2c47aee6adc2cd1044485633064ee5 | 16,709 | py | Python | nmt/joint/csimplejoint.py | Roxot/AEVNMT | 5ebc6ec76b2c891ab76a5584a15e735145fdc43b | [
"Apache-2.0"
] | 15 | 2018-07-30T13:19:58.000Z | 2019-10-20T13:58:10.000Z | nmt/joint/csimplejoint.py | Roxot/AEVNMT | 5ebc6ec76b2c891ab76a5584a15e735145fdc43b | [
"Apache-2.0"
] | null | null | null | nmt/joint/csimplejoint.py | Roxot/AEVNMT | 5ebc6ec76b2c891ab76a5584a15e735145fdc43b | [
"Apache-2.0"
] | null | null | null | """
:Authors: - Bryan Eikema
"""
import tensorflow as tf
import nmt.utils.misc_utils as utils
from . import DSimpleJointModel
from nmt import model_helper
from .utils import language_model
from nmt.utils.gaussianhelper import GaussianHelper
from nmt.utils.amt_utils import enrich_embeddings_with_positions, self_attention_layer, diagonal_attention_coefficients
class CSimpleJointModel(DSimpleJointModel):
def __init__(self, hparams, mode, iterator, source_vocab_table,
target_vocab_table, reverse_target_vocab_table=None,
scope=None, extra_args=None, no_summaries=False):
super(CSimpleJointModel, self).__init__(hparams=hparams, mode=mode,
iterator=iterator, source_vocab_table=source_vocab_table,
target_vocab_table=target_vocab_table,
reverse_target_vocab_table=reverse_target_vocab_table,
scope=scope, extra_args=extra_args, no_summaries=True)
# Set model specific training summaries.
if self.mode == tf.contrib.learn.ModeKeys.TRAIN and not no_summaries:
self.bi_summary = tf.summary.merge([
self._base_summaries,
self._supervised_tm_accuracy_summary,
tf.summary.scalar("supervised_ELBO", self._elbo),
tf.summary.scalar("supervised_tm_loss", self._tm_loss),
tf.summary.scalar("supervised_lm_loss", self._lm_loss)])
self.mono_summary = tf.summary.merge([
self._base_summaries,
tf.summary.scalar("semi_supervised_tm_accuracy", self._tm_accuracy),
tf.summary.scalar("semi_supervised_ELBO", self._elbo),
tf.summary.scalar("semi_supervised_tm_loss", self._tm_loss),
tf.summary.scalar("semi_supervised_lm_loss", self._lm_loss),
tf.summary.scalar("semi_supervised_entropy", self._entropy)])
# Overrides DSimpleJointModel._source_embedding
# We use pre-trained embeddings, thus don't do an embedding lookup.
def _source_embedding(self, source):
return source
# Overrides DSimpleJointModel._parse_iterator
# Returns word embeddings instead of one hot vectors.
def _parse_iterator(self, iterator, hparams, scope=None):
dtype = tf.float32
with tf.variable_scope(scope or "dynamic_seq2seq", dtype=dtype):
self.src_embed_size = self.embedding_encoder.shape[1]
self.initializer = iterator.initializer
self.mono_initializer = iterator.mono_initializer
self.mono_batch = iterator.mono_batch
# Change the data depending on what type of batch we're training on.
self.target_input, self.target_output, self.target_sequence_length = tf.cond(
self.mono_batch,
true_fn=lambda: (iterator.mono_text_input, iterator.mono_text_output,
iterator.mono_text_length),
false_fn=lambda: (iterator.target_input, iterator.target_output,
iterator.target_sequence_length))
if self.mode != tf.contrib.learn.ModeKeys.INFER:
self.batch_size = tf.size(self.target_sequence_length)
else:
self.batch_size = tf.size(iterator.source_sequence_length)
self.source, self.source_output, self.source_sequence_length = tf.cond(
self.mono_batch,
true_fn=lambda: self._infer_source(iterator, hparams, embeddings=True),
false_fn=lambda: (tf.nn.embedding_lookup(self.embedding_encoder,
iterator.source),
tf.nn.embedding_lookup(self.embedding_encoder,
iterator.source_output),
iterator.source_sequence_length))
# Builds a Gaussian language model with fixed diagonal unit variance.
# If z_sample is given it will be used to initialize the RNNLM.
def _build_language_model(self, hparams, z_sample=None):
source = self.source
if self.time_major:
source = self._transpose_time_major(source)
# Use embeddings as inputs.
embeddings = self._source_embedding(source)
# Run the RNNLM.
lm_outputs = language_model(embeddings, self.source_sequence_length,
hparams, self.mode, self.single_cell_fn, self.time_major,
self.batch_size, z_sample=z_sample)
# Put the RNN output through a projection layer to obtain a mean for the
# Gaussians.
mean = tf.layers.dense(
lm_outputs.rnn_output,
self.src_embed_size,
name="output_projection")
stddev = tf.ones_like(mean)
return tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
# Overrides DSimpleJointModel.build_graph
def build_graph(self, hparams, scope=None):
utils.print_out("# creating %s graph ..." % self.mode)
dtype = tf.float32
with tf.variable_scope(scope or "dynamic_seq2seq", dtype=dtype):
with tf.variable_scope("generative_model", dtype=dtype):
# P(x_1^m) language model
gauss_observations = self._build_language_model(hparams)
# P(y_1^n|x_1^m) encoder
encoder_outputs, encoder_state = self._build_encoder(hparams)
# P(y_1^n|x_1^m) decoder
tm_logits, sample_id, final_context_state = self._build_decoder(
encoder_outputs, encoder_state, hparams)
# Loss
if self.mode != tf.contrib.learn.ModeKeys.INFER:
with tf.device(model_helper.get_device_str(self.num_encoder_layers - 1,
self.num_gpus)):
loss, components = self._compute_loss(tm_logits, gauss_observations)
else:
loss = None
# Save for summaries.
if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
self._tm_loss = components[0]
self._lm_loss = components[1]
self._entropy = components[2]
self._elbo = -loss
return tm_logits, loss, final_context_state, sample_id
# Overrides DSimpleJointModel._diagonal_decoder
# Predicts Gaussian X_i | x_<i, y_1^n.
def _diagonal_decoder(self, encoder_outputs, target_length,
predicted_source_length, hparams):
# Tile encoder_outputs from [B x T_i x d] to [B x T_o x T_i x d]
encoder_outputs = tf.expand_dims(encoder_outputs, axis=1)
encoder_outputs = tf.tile(encoder_outputs,
multiples=[1, tf.reduce_max(predicted_source_length), 1, 1])
# Create source and target sequence masks.
y_mask = tf.sequence_mask(target_length, dtype=tf.float32)
x_mask = tf.sequence_mask(predicted_source_length,
dtype=tf.float32)
# Compute fixed decoder coefficients based only on the source and
# target sentence length.
attention_coefficients = diagonal_attention_coefficients(y_mask, x_mask,
target_length, predicted_source_length)
attention_coefficients = tf.expand_dims(attention_coefficients, axis=-1)
attention_output = tf.reduce_sum(encoder_outputs * attention_coefficients,
axis=2)
# Use the attention output to predict a mean and a diagonal covariance
# for X_i.
mean = tf.layers.dense(attention_output, self.src_embed_size)
if hparams.Qx_covariance == "diagonal":
stddev = tf.layers.dense(attention_output, self.src_embed_size)
self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
elif hparams.Qx_covariance == "full":
# Predict the cholesky factor.
cov_matrix_values = tf.layers.dense(attention_output,
self.src_embed_size * self.src_embed_size)
cov_matrix = tf.reshape(cov_matrix_values,
[self.batch_size, tf.reduce_max(predicted_source_length),
self.src_embed_size, self.src_embed_size])
cholesky = tf.contrib.distributions.matrix_diag_transform(cov_matrix,
transform=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalTriL(
loc=mean, scale_tril=cholesky)
else:
raise ValueError("Unknown value for Qx_covariance: %s" % \
hparams.Qx_covariance)
return self.Qx.sample()
# Overrides dsimplejoint._deterministic_rnn_decoder_with_attention
def _deterministic_rnn_decoder_with_attention(self, encoder_outputs, final_state,
target_length, predicted_source_length, hparams):
max_source_length = tf.reduce_max(predicted_source_length)
encoder_output = tf.tile(tf.expand_dims(final_state, 1),
[1, max_source_length, 1])
inputs = enrich_embeddings_with_positions(encoder_output,
hparams.num_units, "positional_embeddings")
if self.time_major:
inputs = self._transpose_time_major(inputs)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
hparams.num_units, encoder_outputs,
memory_sequence_length=target_length)
cell = tf.contrib.rnn.GRUCell(hparams.num_units)
cell = tf.contrib.seq2seq.AttentionWrapper(
cell,
attention_mechanism,
attention_layer_size=hparams.num_units,
alignment_history=False,
output_attention=False,
name="attention")
decoder_outputs, _ = tf.nn.dynamic_rnn(cell, inputs,
sequence_length=predicted_source_length,
time_major=self.time_major,
dtype=inputs.dtype)
# Return batch major.
if self.time_major:
decoder_outputs = self._transpose_time_major(decoder_outputs)
mean = tf.layers.dense(decoder_outputs, self.src_embed_size)
if hparams.Qx_covariance == "diagonal":
stddev = tf.layers.dense(decoder_outputs, self.src_embed_size, activation=tf.nn.sotftplus)
self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
elif hparams.Qx_covariance == "full":
# Predict the cholesky factor.
cov_matrix_values = tf.layers.dense(decoder_outputs,
self.src_embed_size * self.src_embed_size)
cov_matrix = tf.reshape(cov_matrix_values,
[self.batch_size, tf.reduce_max(predicted_source_length),
self.src_embed_size, self.src_embed_size])
cholesky = tf.contrib.distributions.matrix_diag_transform(cov_matrix,
transform=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalTriL(
loc=mean, scale_tril=cholesky)
else:
raise ValueError("Unknown value for Qx_covariance: %s" % \
hparams.Qx_covariance)
return self.Qx.sample()
# Overrides dsimplejoint._deterministic_rnn_decoder
def _deterministic_rnn_decoder(self, encoder_outputs, final_state,
target_length, predicted_source_length, hparams):
max_source_length = tf.reduce_max(predicted_source_length)
inputs = tf.tile(tf.expand_dims(final_state, 1),
[1, max_source_length, 1])
inputs = enrich_embeddings_with_positions(inputs,
hparams.num_units, "positional_embeddings")
if self.time_major:
inputs = self._transpose_time_major(inputs)
cell = tf.contrib.rnn.GRUCell(hparams.num_units)
decoder_outputs, _ = tf.nn.dynamic_rnn(cell, inputs,
sequence_length=predicted_source_length,
time_major=self.time_major,
dtype=inputs.dtype)
# Return batch major.
if self.time_major:
decoder_outputs = self._transpose_time_major(decoder_outputs)
with tf.variable_scope("mean_inference_net"):
mean = tf.layers.dense(decoder_outputs, self.src_embed_size)
if hparams.Qx_covariance == "diagonal":
with tf.variable_scope("stddev_inference_net"):
stddev = tf.layers.dense(decoder_outputs, self.src_embed_size, activation=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
elif hparams.Qx_covariance == "full":
# Predict the cholesky factor.
cov_matrix_values = tf.layers.dense(decoder_outputs,
self.src_embed_size * self.src_embed_size)
cov_matrix = tf.reshape(cov_matrix_values,
[self.batch_size, tf.reduce_max(predicted_source_length),
self.src_embed_size, self.src_embed_size])
cholesky = tf.contrib.distributions.matrix_diag_transform(cov_matrix,
transform=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalTriL(
loc=mean, scale_tril=cholesky)
else:
raise ValueError("Unknown value for Qx_covariance: %s" % \
hparams.Qx_covariance)
return self.Qx.sample()
# Overrides DSimpleJointModel._rnn_decoder
# Models X_i | y_1^n, x_<i as Gaussian variables.
def _rnn_decoder(self, encoder_outputs, encoder_state, target_length,
predicted_source_length, hparams):
scope = tf.get_variable_scope()
if self.time_major:
encoder_outputs = self._transpose_time_major(encoder_outputs)
# Create an identical cell to the forward NMT decoder, but disable
# inference mode.
cell, decoder_init_state = self._build_decoder_cell(hparams,
encoder_outputs, encoder_state, target_length, no_infer=True)
# Create the initial inputs for the decoder. Use the generative embedding
# matrix but stop the gradients.
src_sos_id = tf.cast(self.src_vocab_table.lookup(
tf.constant(hparams.sos)), tf.int32)
start_tokens = tf.fill([self.batch_size], src_sos_id)
start_tokens = tf.nn.embedding_lookup(self.embedding_encoder, start_tokens)
start_tokens = tf.stop_gradient(start_tokens)
# Create the Gaussian helper to generate Gaussian samples.
helper = GaussianHelper(
start_tokens=start_tokens,
decode_lengths=predicted_source_length,
full_covariance=(hparams.Qx_covariance == "full"))
utils.print_out(" creating GaussianHelper")
# Create the decoder.
if hparams.Qx_covariance == "diagonal":
projection_layer_size = self.src_embed_size * 2
elif hparams.Qx_covariance == "full":
projection_layer_size = self.src_embed_size + \
self.src_embed_size * self.src_embed_size
else:
raise ValueError("Unknown value for Qx_covariance: %s" % \
hparams.Qx_covariance)
projection_layer = tf.layers.Dense(projection_layer_size)
decoder = tf.contrib.seq2seq.BasicDecoder(cell, helper,
decoder_init_state, output_layer=projection_layer)
# Decode the Concrete source sentence.
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder,
output_time_major=self.time_major,
maximum_iterations=tf.reduce_max(predicted_source_length),
swap_memory=True,
scope=scope)
inferred_source = outputs.sample_id
mean = outputs.rnn_output[:, :, :self.src_embed_size]
stddev = outputs.rnn_output[:, :, self.src_embed_size:]
# Return in batch major.
if self.time_major:
inferred_source = self._transpose_time_major(inferred_source)
mean = self._transpose_time_major(mean)
stddev = self._transpose_time_major(stddev)
if hparams.Qx_covariance == "diagonal":
self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
else:
# Full covariance.
covariance = tf.reshape(stddev, [self.batch_size,
tf.reduce_max(predicted_source_length), self.src_embed_size,
self.src_embed_size])
cholesky = tf.contrib.distributions.matrix_diag_transform(covariance,
transform=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalTriL(
loc=mean, scale_tril=cholesky)
return inferred_source
# Gives the negative log-likelihood of given observations for a gaussian
# variable.
def _gaussian_nll(self, gauss_var, observations, observation_length):
if self.time_major: observations = self._transpose_time_major(observations)
log_prob = gauss_var.log_prob(observations)
max_source_time = self.get_max_time(observations)
mask = tf.sequence_mask(observation_length, max_source_time,
dtype=log_prob.dtype)
if self.time_major: mask = tf.transpose(mask)
nll = -tf.reduce_sum(log_prob * mask) / tf.to_float(self.batch_size)
return nll
# Overrides DSimpleJointModel._compute_loss
def _compute_loss(self, tm_logits, gauss_observations):
# - log P(y_1^n)
tm_loss = self._compute_categorical_loss(tm_logits,
self.target_output, self.target_sequence_length)
# - log p(x_1^m)
lm_loss = self._gaussian_nll(gauss_observations, self.source_output,
self.source_sequence_length)
# H(X|y_1^n) -- keep in mind self.Qx is defined in batch major, as are all
# data streams.
entropy = tf.cond(self.mono_batch,
true_fn=lambda: tf.reduce_mean(tf.reduce_sum(self.Qx.entropy(), axis=1)),
false_fn=lambda: tf.constant(0.))
return tm_loss + lm_loss - entropy, (tm_loss, lm_loss, entropy)
| 41.155172 | 119 | 0.707283 |
import tensorflow as tf
import nmt.utils.misc_utils as utils
from . import DSimpleJointModel
from nmt import model_helper
from .utils import language_model
from nmt.utils.gaussianhelper import GaussianHelper
from nmt.utils.amt_utils import enrich_embeddings_with_positions, self_attention_layer, diagonal_attention_coefficients
class CSimpleJointModel(DSimpleJointModel):
def __init__(self, hparams, mode, iterator, source_vocab_table,
target_vocab_table, reverse_target_vocab_table=None,
scope=None, extra_args=None, no_summaries=False):
super(CSimpleJointModel, self).__init__(hparams=hparams, mode=mode,
iterator=iterator, source_vocab_table=source_vocab_table,
target_vocab_table=target_vocab_table,
reverse_target_vocab_table=reverse_target_vocab_table,
scope=scope, extra_args=extra_args, no_summaries=True)
if self.mode == tf.contrib.learn.ModeKeys.TRAIN and not no_summaries:
self.bi_summary = tf.summary.merge([
self._base_summaries,
self._supervised_tm_accuracy_summary,
tf.summary.scalar("supervised_ELBO", self._elbo),
tf.summary.scalar("supervised_tm_loss", self._tm_loss),
tf.summary.scalar("supervised_lm_loss", self._lm_loss)])
self.mono_summary = tf.summary.merge([
self._base_summaries,
tf.summary.scalar("semi_supervised_tm_accuracy", self._tm_accuracy),
tf.summary.scalar("semi_supervised_ELBO", self._elbo),
tf.summary.scalar("semi_supervised_tm_loss", self._tm_loss),
tf.summary.scalar("semi_supervised_lm_loss", self._lm_loss),
tf.summary.scalar("semi_supervised_entropy", self._entropy)])
def _source_embedding(self, source):
return source
# Overrides DSimpleJointModel._parse_iterator
# Returns word embeddings instead of one hot vectors.
def _parse_iterator(self, iterator, hparams, scope=None):
dtype = tf.float32
with tf.variable_scope(scope or "dynamic_seq2seq", dtype=dtype):
self.src_embed_size = self.embedding_encoder.shape[1]
self.initializer = iterator.initializer
self.mono_initializer = iterator.mono_initializer
self.mono_batch = iterator.mono_batch
# Change the data depending on what type of batch we're training on.
self.target_input, self.target_output, self.target_sequence_length = tf.cond(
self.mono_batch,
true_fn=lambda: (iterator.mono_text_input, iterator.mono_text_output,
iterator.mono_text_length),
false_fn=lambda: (iterator.target_input, iterator.target_output,
iterator.target_sequence_length))
if self.mode != tf.contrib.learn.ModeKeys.INFER:
self.batch_size = tf.size(self.target_sequence_length)
else:
self.batch_size = tf.size(iterator.source_sequence_length)
self.source, self.source_output, self.source_sequence_length = tf.cond(
self.mono_batch,
true_fn=lambda: self._infer_source(iterator, hparams, embeddings=True),
false_fn=lambda: (tf.nn.embedding_lookup(self.embedding_encoder,
iterator.source),
tf.nn.embedding_lookup(self.embedding_encoder,
iterator.source_output),
iterator.source_sequence_length))
def _build_language_model(self, hparams, z_sample=None):
source = self.source
if self.time_major:
source = self._transpose_time_major(source)
embeddings = self._source_embedding(source)
lm_outputs = language_model(embeddings, self.source_sequence_length,
hparams, self.mode, self.single_cell_fn, self.time_major,
self.batch_size, z_sample=z_sample)
mean = tf.layers.dense(
lm_outputs.rnn_output,
self.src_embed_size,
name="output_projection")
stddev = tf.ones_like(mean)
return tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
def build_graph(self, hparams, scope=None):
utils.print_out("# creating %s graph ..." % self.mode)
dtype = tf.float32
with tf.variable_scope(scope or "dynamic_seq2seq", dtype=dtype):
with tf.variable_scope("generative_model", dtype=dtype):
gauss_observations = self._build_language_model(hparams)
encoder_outputs, encoder_state = self._build_encoder(hparams)
tm_logits, sample_id, final_context_state = self._build_decoder(
encoder_outputs, encoder_state, hparams)
if self.mode != tf.contrib.learn.ModeKeys.INFER:
with tf.device(model_helper.get_device_str(self.num_encoder_layers - 1,
self.num_gpus)):
loss, components = self._compute_loss(tm_logits, gauss_observations)
else:
loss = None
if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
self._tm_loss = components[0]
self._lm_loss = components[1]
self._entropy = components[2]
self._elbo = -loss
return tm_logits, loss, final_context_state, sample_id
def _diagonal_decoder(self, encoder_outputs, target_length,
predicted_source_length, hparams):
encoder_outputs = tf.expand_dims(encoder_outputs, axis=1)
encoder_outputs = tf.tile(encoder_outputs,
multiples=[1, tf.reduce_max(predicted_source_length), 1, 1])
y_mask = tf.sequence_mask(target_length, dtype=tf.float32)
x_mask = tf.sequence_mask(predicted_source_length,
dtype=tf.float32)
attention_coefficients = diagonal_attention_coefficients(y_mask, x_mask,
target_length, predicted_source_length)
attention_coefficients = tf.expand_dims(attention_coefficients, axis=-1)
attention_output = tf.reduce_sum(encoder_outputs * attention_coefficients,
axis=2)
mean = tf.layers.dense(attention_output, self.src_embed_size)
if hparams.Qx_covariance == "diagonal":
stddev = tf.layers.dense(attention_output, self.src_embed_size)
self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
elif hparams.Qx_covariance == "full":
cov_matrix_values = tf.layers.dense(attention_output,
self.src_embed_size * self.src_embed_size)
cov_matrix = tf.reshape(cov_matrix_values,
[self.batch_size, tf.reduce_max(predicted_source_length),
self.src_embed_size, self.src_embed_size])
cholesky = tf.contrib.distributions.matrix_diag_transform(cov_matrix,
transform=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalTriL(
loc=mean, scale_tril=cholesky)
else:
raise ValueError("Unknown value for Qx_covariance: %s" % \
hparams.Qx_covariance)
return self.Qx.sample()
def _deterministic_rnn_decoder_with_attention(self, encoder_outputs, final_state,
target_length, predicted_source_length, hparams):
max_source_length = tf.reduce_max(predicted_source_length)
encoder_output = tf.tile(tf.expand_dims(final_state, 1),
[1, max_source_length, 1])
inputs = enrich_embeddings_with_positions(encoder_output,
hparams.num_units, "positional_embeddings")
if self.time_major:
inputs = self._transpose_time_major(inputs)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
hparams.num_units, encoder_outputs,
memory_sequence_length=target_length)
cell = tf.contrib.rnn.GRUCell(hparams.num_units)
cell = tf.contrib.seq2seq.AttentionWrapper(
cell,
attention_mechanism,
attention_layer_size=hparams.num_units,
alignment_history=False,
output_attention=False,
name="attention")
decoder_outputs, _ = tf.nn.dynamic_rnn(cell, inputs,
sequence_length=predicted_source_length,
time_major=self.time_major,
dtype=inputs.dtype)
if self.time_major:
decoder_outputs = self._transpose_time_major(decoder_outputs)
mean = tf.layers.dense(decoder_outputs, self.src_embed_size)
if hparams.Qx_covariance == "diagonal":
stddev = tf.layers.dense(decoder_outputs, self.src_embed_size, activation=tf.nn.sotftplus)
self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
elif hparams.Qx_covariance == "full":
cov_matrix_values = tf.layers.dense(decoder_outputs,
self.src_embed_size * self.src_embed_size)
cov_matrix = tf.reshape(cov_matrix_values,
[self.batch_size, tf.reduce_max(predicted_source_length),
self.src_embed_size, self.src_embed_size])
cholesky = tf.contrib.distributions.matrix_diag_transform(cov_matrix,
transform=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalTriL(
loc=mean, scale_tril=cholesky)
else:
raise ValueError("Unknown value for Qx_covariance: %s" % \
hparams.Qx_covariance)
return self.Qx.sample()
def _deterministic_rnn_decoder(self, encoder_outputs, final_state,
target_length, predicted_source_length, hparams):
max_source_length = tf.reduce_max(predicted_source_length)
inputs = tf.tile(tf.expand_dims(final_state, 1),
[1, max_source_length, 1])
inputs = enrich_embeddings_with_positions(inputs,
hparams.num_units, "positional_embeddings")
if self.time_major:
inputs = self._transpose_time_major(inputs)
cell = tf.contrib.rnn.GRUCell(hparams.num_units)
decoder_outputs, _ = tf.nn.dynamic_rnn(cell, inputs,
sequence_length=predicted_source_length,
time_major=self.time_major,
dtype=inputs.dtype)
if self.time_major:
decoder_outputs = self._transpose_time_major(decoder_outputs)
with tf.variable_scope("mean_inference_net"):
mean = tf.layers.dense(decoder_outputs, self.src_embed_size)
if hparams.Qx_covariance == "diagonal":
with tf.variable_scope("stddev_inference_net"):
stddev = tf.layers.dense(decoder_outputs, self.src_embed_size, activation=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
elif hparams.Qx_covariance == "full":
cov_matrix_values = tf.layers.dense(decoder_outputs,
self.src_embed_size * self.src_embed_size)
cov_matrix = tf.reshape(cov_matrix_values,
[self.batch_size, tf.reduce_max(predicted_source_length),
self.src_embed_size, self.src_embed_size])
cholesky = tf.contrib.distributions.matrix_diag_transform(cov_matrix,
transform=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalTriL(
loc=mean, scale_tril=cholesky)
else:
raise ValueError("Unknown value for Qx_covariance: %s" % \
hparams.Qx_covariance)
return self.Qx.sample()
def _rnn_decoder(self, encoder_outputs, encoder_state, target_length,
predicted_source_length, hparams):
scope = tf.get_variable_scope()
if self.time_major:
encoder_outputs = self._transpose_time_major(encoder_outputs)
cell, decoder_init_state = self._build_decoder_cell(hparams,
encoder_outputs, encoder_state, target_length, no_infer=True)
src_sos_id = tf.cast(self.src_vocab_table.lookup(
tf.constant(hparams.sos)), tf.int32)
start_tokens = tf.fill([self.batch_size], src_sos_id)
start_tokens = tf.nn.embedding_lookup(self.embedding_encoder, start_tokens)
start_tokens = tf.stop_gradient(start_tokens)
helper = GaussianHelper(
start_tokens=start_tokens,
decode_lengths=predicted_source_length,
full_covariance=(hparams.Qx_covariance == "full"))
utils.print_out(" creating GaussianHelper")
if hparams.Qx_covariance == "diagonal":
projection_layer_size = self.src_embed_size * 2
elif hparams.Qx_covariance == "full":
projection_layer_size = self.src_embed_size + \
self.src_embed_size * self.src_embed_size
else:
raise ValueError("Unknown value for Qx_covariance: %s" % \
hparams.Qx_covariance)
projection_layer = tf.layers.Dense(projection_layer_size)
decoder = tf.contrib.seq2seq.BasicDecoder(cell, helper,
decoder_init_state, output_layer=projection_layer)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder,
output_time_major=self.time_major,
maximum_iterations=tf.reduce_max(predicted_source_length),
swap_memory=True,
scope=scope)
inferred_source = outputs.sample_id
mean = outputs.rnn_output[:, :, :self.src_embed_size]
stddev = outputs.rnn_output[:, :, self.src_embed_size:]
if self.time_major:
inferred_source = self._transpose_time_major(inferred_source)
mean = self._transpose_time_major(mean)
stddev = self._transpose_time_major(stddev)
if hparams.Qx_covariance == "diagonal":
self.Qx = tf.contrib.distributions.MultivariateNormalDiag(loc=mean,
scale_diag=stddev)
else:
covariance = tf.reshape(stddev, [self.batch_size,
tf.reduce_max(predicted_source_length), self.src_embed_size,
self.src_embed_size])
cholesky = tf.contrib.distributions.matrix_diag_transform(covariance,
transform=tf.nn.softplus)
self.Qx = tf.contrib.distributions.MultivariateNormalTriL(
loc=mean, scale_tril=cholesky)
return inferred_source
def _gaussian_nll(self, gauss_var, observations, observation_length):
if self.time_major: observations = self._transpose_time_major(observations)
log_prob = gauss_var.log_prob(observations)
max_source_time = self.get_max_time(observations)
mask = tf.sequence_mask(observation_length, max_source_time,
dtype=log_prob.dtype)
if self.time_major: mask = tf.transpose(mask)
nll = -tf.reduce_sum(log_prob * mask) / tf.to_float(self.batch_size)
return nll
def _compute_loss(self, tm_logits, gauss_observations):
tm_loss = self._compute_categorical_loss(tm_logits,
self.target_output, self.target_sequence_length)
lm_loss = self._gaussian_nll(gauss_observations, self.source_output,
self.source_sequence_length)
entropy = tf.cond(self.mono_batch,
true_fn=lambda: tf.reduce_mean(tf.reduce_sum(self.Qx.entropy(), axis=1)),
false_fn=lambda: tf.constant(0.))
return tm_loss + lm_loss - entropy, (tm_loss, lm_loss, entropy)
| true | true |
1c33045044c41d07abe899c546597c89767ac0a5 | 3,457 | py | Python | services/models.py | Jordzman/explorer | d0b796014320761cd751429a9dc6ad48fe90ed96 | [
"Apache-2.0"
] | 2 | 2016-08-07T02:59:23.000Z | 2016-08-21T23:48:03.000Z | services/models.py | Trevorulliac/explorer | d2f836afd0b827041b1c75a8dd056fdb2beb2e1d | [
"Apache-2.0"
] | 29 | 2020-05-01T08:05:16.000Z | 2021-06-25T15:36:13.000Z | services/models.py | Trevorulliac/explorer | d2f836afd0b827041b1c75a8dd056fdb2beb2e1d | [
"Apache-2.0"
] | 1 | 2016-02-09T21:47:34.000Z | 2016-02-09T21:47:34.000Z | from django.db import models
from blockexplorer.raven import client
from jsonfield import JSONField
from utils import get_client_ip, uri_to_url, is_good_status_code, get_user_agent
import json
import requests
class APICall(models.Model):
"""
To keep track of all our external API calls and aid in debugging as well.
"""
API_NAME_CHOICES = ()
# Main fields
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
api_name = models.CharField(choices=API_NAME_CHOICES, max_length=3, null=False, blank=False, db_index=True)
url_hit = models.URLField(max_length=1024, blank=False, null=False, db_index=True)
response_code = models.PositiveSmallIntegerField(blank=False, null=False, db_index=True)
post_params = JSONField(blank=True, null=True)
headers = models.CharField(max_length=2048, null=True, blank=True)
api_results = models.CharField(max_length=100000, blank=True, null=True)
def __str__(self):
return '%s from %s' % (self.id, self.api_name)
class WebHook(models.Model):
"""
To keep track of all our webhooks and aid in debugging as well.
"""
# api_name choices
BLOCKCYPHER_ADDRESS_NOTIFICATION = 'BAN'
API_NAME_CHOICES = (
(BLOCKCYPHER_ADDRESS_NOTIFICATION, 'blockcypher address notification'),
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
# IP and UA of machine hitting coinsafe
ip_address = models.IPAddressField(null=False, blank=False, db_index=True)
user_agent = models.CharField(max_length=1024, blank=True, db_index=True)
api_name = models.CharField(choices=API_NAME_CHOICES, max_length=3, null=False, blank=False, db_index=True)
hostname = models.CharField(max_length=512, blank=False, null=False, db_index=True)
request_path = models.CharField(max_length=2048, blank=False, null=False, db_index=True)
uses_https = models.BooleanField(db_index=True, default=False)
succeeded = models.BooleanField(db_index=True, default=False)
data_from_get = JSONField(blank=True, null=True)
data_from_post = JSONField(blank=True, null=True)
# optional FKs
address_subscription = models.ForeignKey('addresses.AddressSubscription', null=True, blank=True)
def __str__(self):
return '%s from %s' % (self.id, self.api_name)
@classmethod
def log_webhook(cls, request, api_name):
try:
data_from_post = json.loads(request.body.decode())
except Exception:
client.captureException()
data_from_post = None
return cls.objects.create(
ip_address=get_client_ip(request),
user_agent=get_user_agent(request),
api_name=api_name,
hostname=request.get_host(),
request_path=request.path,
uses_https=request.is_secure(),
data_from_get=request.GET,
data_from_post=data_from_post,
)
def retry_webhook(self):
" Debug method to be called at the command line only "
url_to_hit = uri_to_url(self.request_path)
if self.data_from_get:
r = requests.get(url_to_hit, params=self.data_from_get)
elif self.data_from_post:
r = requests.post(url_to_hit, params=json.dumps(self.data_from_post))
if is_good_status_code(r.status_code):
return True
else:
return r.text
| 37.576087 | 111 | 0.689037 | from django.db import models
from blockexplorer.raven import client
from jsonfield import JSONField
from utils import get_client_ip, uri_to_url, is_good_status_code, get_user_agent
import json
import requests
class APICall(models.Model):
API_NAME_CHOICES = ()
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
api_name = models.CharField(choices=API_NAME_CHOICES, max_length=3, null=False, blank=False, db_index=True)
url_hit = models.URLField(max_length=1024, blank=False, null=False, db_index=True)
response_code = models.PositiveSmallIntegerField(blank=False, null=False, db_index=True)
post_params = JSONField(blank=True, null=True)
headers = models.CharField(max_length=2048, null=True, blank=True)
api_results = models.CharField(max_length=100000, blank=True, null=True)
def __str__(self):
return '%s from %s' % (self.id, self.api_name)
class WebHook(models.Model):
BLOCKCYPHER_ADDRESS_NOTIFICATION = 'BAN'
API_NAME_CHOICES = (
(BLOCKCYPHER_ADDRESS_NOTIFICATION, 'blockcypher address notification'),
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
ip_address = models.IPAddressField(null=False, blank=False, db_index=True)
user_agent = models.CharField(max_length=1024, blank=True, db_index=True)
api_name = models.CharField(choices=API_NAME_CHOICES, max_length=3, null=False, blank=False, db_index=True)
hostname = models.CharField(max_length=512, blank=False, null=False, db_index=True)
request_path = models.CharField(max_length=2048, blank=False, null=False, db_index=True)
uses_https = models.BooleanField(db_index=True, default=False)
succeeded = models.BooleanField(db_index=True, default=False)
data_from_get = JSONField(blank=True, null=True)
data_from_post = JSONField(blank=True, null=True)
address_subscription = models.ForeignKey('addresses.AddressSubscription', null=True, blank=True)
def __str__(self):
return '%s from %s' % (self.id, self.api_name)
@classmethod
def log_webhook(cls, request, api_name):
try:
data_from_post = json.loads(request.body.decode())
except Exception:
client.captureException()
data_from_post = None
return cls.objects.create(
ip_address=get_client_ip(request),
user_agent=get_user_agent(request),
api_name=api_name,
hostname=request.get_host(),
request_path=request.path,
uses_https=request.is_secure(),
data_from_get=request.GET,
data_from_post=data_from_post,
)
def retry_webhook(self):
url_to_hit = uri_to_url(self.request_path)
if self.data_from_get:
r = requests.get(url_to_hit, params=self.data_from_get)
elif self.data_from_post:
r = requests.post(url_to_hit, params=json.dumps(self.data_from_post))
if is_good_status_code(r.status_code):
return True
else:
return r.text
| true | true |
1c33046ed0d758165c4c2cf5afaa8e2465772ca8 | 341 | py | Python | Tests/__init__.py | Jakar510/PythonExtensions | f29600f73454d21345f6da893a1df1b71ddacd0b | [
"MIT"
] | null | null | null | Tests/__init__.py | Jakar510/PythonExtensions | f29600f73454d21345f6da893a1df1b71ddacd0b | [
"MIT"
] | null | null | null | Tests/__init__.py | Jakar510/PythonExtensions | f29600f73454d21345f6da893a1df1b71ddacd0b | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Created by Tyler Stegmaier
# Copyright (c) 2021.
#
# ------------------------------------------------------------------------------
from .SwitchCase import *
from .test_tk import *
from .tests import *
def Run():
import unittest
unittest.main()
| 22.733333 | 80 | 0.354839 |
from .SwitchCase import *
from .test_tk import *
from .tests import *
def Run():
import unittest
unittest.main()
| true | true |
1c33060502174fd1ff63d68472a286c693fcc071 | 249 | py | Python | kader/kader/doctype/custody/custody.py | Muad96/kader | 287d37d110ec2058256d4936d3ef69345781e1a5 | [
"MIT"
] | null | null | null | kader/kader/doctype/custody/custody.py | Muad96/kader | 287d37d110ec2058256d4936d3ef69345781e1a5 | [
"MIT"
] | null | null | null | kader/kader/doctype/custody/custody.py | Muad96/kader | 287d37d110ec2058256d4936d3ef69345781e1a5 | [
"MIT"
] | 1 | 2021-08-01T13:47:36.000Z | 2021-08-01T13:47:36.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, KABCO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Custody(Document):
pass
| 22.636364 | 49 | 0.7751 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Custody(Document):
pass
| true | true |
1c33065e10b05c1e01c2284f426936a6e62f4a23 | 1,249 | py | Python | python/paddle/fluid/tests/unittests/test_is_integer.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 8 | 2016-08-15T07:02:27.000Z | 2016-08-24T09:34:00.000Z | python/paddle/fluid/tests/unittests/test_is_integer.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 1 | 2021-11-01T06:28:16.000Z | 2021-11-01T06:28:16.000Z | python/paddle/fluid/tests/unittests/test_is_integer.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 5 | 2021-12-10T11:20:06.000Z | 2022-02-18T05:18:12.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
import unittest
class TestIsInteger(unittest.TestCase):
def test_for_integer(self):
x = paddle.arange(10)
self.assertTrue(paddle.is_integer(x))
def test_for_floating_point(self):
x = paddle.randn([2, 3])
self.assertFalse(paddle.is_integer(x))
def test_for_complex(self):
x = paddle.randn([2, 3]) + 1j * paddle.randn([2, 3])
self.assertFalse(paddle.is_integer(x))
def test_for_exception(self):
with self.assertRaises(TypeError):
paddle.is_integer(np.array([1, 2]))
if __name__ == '__main__':
unittest.main()
| 31.225 | 74 | 0.703763 |
import paddle
import numpy as np
import unittest
class TestIsInteger(unittest.TestCase):
def test_for_integer(self):
x = paddle.arange(10)
self.assertTrue(paddle.is_integer(x))
def test_for_floating_point(self):
x = paddle.randn([2, 3])
self.assertFalse(paddle.is_integer(x))
def test_for_complex(self):
x = paddle.randn([2, 3]) + 1j * paddle.randn([2, 3])
self.assertFalse(paddle.is_integer(x))
def test_for_exception(self):
with self.assertRaises(TypeError):
paddle.is_integer(np.array([1, 2]))
if __name__ == '__main__':
unittest.main()
| true | true |
1c33077976c61c4be83e2036907ca191f93bc51d | 11,718 | py | Python | os_brick/tests/encryptors/test_luks.py | ebalduf/os-brick-backports | 6a180265560d5e0bab80e47ac25c15906d1165fb | [
"Apache-2.0"
] | null | null | null | os_brick/tests/encryptors/test_luks.py | ebalduf/os-brick-backports | 6a180265560d5e0bab80e47ac25c15906d1165fb | [
"Apache-2.0"
] | null | null | null | os_brick/tests/encryptors/test_luks.py | ebalduf/os-brick-backports | 6a180265560d5e0bab80e47ac25c15906d1165fb | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import mock
import uuid
from castellan.common.objects import symmetric_key as key
from os_brick.encryptors import luks
from os_brick.tests.encryptors import test_cryptsetup
from oslo_concurrency import processutils as putils
class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
def _create(self):
return luks.LuksEncryptor(root_helper=self.root_helper,
connection_info=self.connection_info,
keymgr=self.keymgr)
@mock.patch('os_brick.executor.Executor._execute')
def test_is_luks(self, mock_execute):
luks.is_luks(self.root_helper, self.dev_path, execute=mock_execute)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, root_helper=self.root_helper,
check_exit_code=True),
], any_order=False)
@mock.patch('os_brick.executor.Executor._execute')
@mock.patch('os_brick.encryptors.luks.LOG')
def test_is_luks_with_error(self, mock_log, mock_execute):
error_msg = "Device %s is not a valid LUKS device." % self.dev_path
mock_execute.side_effect = putils.ProcessExecutionError(
exit_code=1, stderr=error_msg)
luks.is_luks(self.root_helper, self.dev_path, execute=mock_execute)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, root_helper=self.root_helper,
check_exit_code=True),
])
self.assertEqual(1, mock_log.warning.call_count) # warning logged
@mock.patch('os_brick.executor.Executor._execute')
def test__format_volume(self, mock_execute):
self.encryptor._format_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path,
process_input='passphrase',
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True, attempts=3),
])
@mock.patch('os_brick.executor.Executor._execute')
def test__open_volume(self, mock_execute):
self.encryptor._open_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input='passphrase',
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume_not_formatted(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
mock_execute.side_effect = [
putils.ProcessExecutionError(exit_code=1), # luksOpen
putils.ProcessExecutionError(exit_code=1), # isLuks
mock.DEFAULT, # luksFormat
mock.DEFAULT, # luksOpen
mock.DEFAULT, # ln
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
], any_order=False)
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume_fail(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
mock_execute.side_effect = [
putils.ProcessExecutionError(exit_code=1), # luksOpen
mock.DEFAULT, # isLuks
]
self.assertRaises(putils.ProcessExecutionError,
self.encryptor.attach_volume, None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
], any_order=False)
@mock.patch('os_brick.executor.Executor._execute')
def test__close_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper,
attempts=3, run_as_root=True, check_exit_code=True),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_detach_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper,
attempts=3, run_as_root=True, check_exit_code=True),
])
def test_get_mangled_passphrase(self):
# Confirm that a mangled passphrase is provided as per bug#1633518
unmangled_raw_key = bytes(binascii.unhexlify('0725230b'))
symmetric_key = key.SymmetricKey('AES', len(unmangled_raw_key) * 8,
unmangled_raw_key)
unmangled_encoded_key = symmetric_key.get_encoded()
self.assertEqual(self.encryptor._get_mangled_passphrase(
unmangled_encoded_key), '72523b')
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume_unmangle_passphrase(self, mock_execute):
fake_key = '0725230b'
fake_key_mangled = '72523b'
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = \
test_cryptsetup.fake__get_key(None, fake_key)
mock_execute.side_effect = [
putils.ProcessExecutionError(exit_code=2), # luksOpen
mock.DEFAULT, # luksOpen
mock.DEFAULT, # luksClose
mock.DEFAULT, # luksAddKey
mock.DEFAULT, # luksOpen
mock.DEFAULT, # luksClose
mock.DEFAULT, # luksRemoveKey
mock.DEFAULT, # luksOpen
mock.DEFAULT, # ln
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key_mangled,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksAddKey', self.dev_path,
process_input=''.join([fake_key_mangled,
'\n', fake_key,
'\n', fake_key]),
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksRemoveKey', self.dev_path,
process_input=fake_key_mangled,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
], any_order=False)
self.assertEqual(9, mock_execute.call_count)
| 45.595331 | 78 | 0.602577 |
import binascii
import mock
import uuid
from castellan.common.objects import symmetric_key as key
from os_brick.encryptors import luks
from os_brick.tests.encryptors import test_cryptsetup
from oslo_concurrency import processutils as putils
class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
def _create(self):
return luks.LuksEncryptor(root_helper=self.root_helper,
connection_info=self.connection_info,
keymgr=self.keymgr)
@mock.patch('os_brick.executor.Executor._execute')
def test_is_luks(self, mock_execute):
luks.is_luks(self.root_helper, self.dev_path, execute=mock_execute)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, root_helper=self.root_helper,
check_exit_code=True),
], any_order=False)
@mock.patch('os_brick.executor.Executor._execute')
@mock.patch('os_brick.encryptors.luks.LOG')
def test_is_luks_with_error(self, mock_log, mock_execute):
error_msg = "Device %s is not a valid LUKS device." % self.dev_path
mock_execute.side_effect = putils.ProcessExecutionError(
exit_code=1, stderr=error_msg)
luks.is_luks(self.root_helper, self.dev_path, execute=mock_execute)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
run_as_root=True, root_helper=self.root_helper,
check_exit_code=True),
])
self.assertEqual(1, mock_log.warning.call_count)
@mock.patch('os_brick.executor.Executor._execute')
def test__format_volume(self, mock_execute):
self.encryptor._format_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path,
process_input='passphrase',
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True, attempts=3),
])
@mock.patch('os_brick.executor.Executor._execute')
def test__open_volume(self, mock_execute):
self.encryptor._open_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input='passphrase',
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume_not_formatted(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
mock_execute.side_effect = [
putils.ProcessExecutionError(exit_code=1),
putils.ProcessExecutionError(exit_code=1),
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT,
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', '--batch-mode', 'luksFormat',
'--key-file=-', self.dev_path, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
], any_order=False)
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume_fail(self, mock_execute):
fake_key = uuid.uuid4().hex
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = (
test_cryptsetup.fake__get_key(None, fake_key))
mock_execute.side_effect = [
putils.ProcessExecutionError(exit_code=1),
mock.DEFAULT,
]
self.assertRaises(putils.ProcessExecutionError,
self.encryptor.attach_volume, None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
mock.call('cryptsetup', 'isLuks', '--verbose', self.dev_path,
root_helper=self.root_helper,
run_as_root=True, check_exit_code=True),
], any_order=False)
@mock.patch('os_brick.executor.Executor._execute')
def test__close_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper,
attempts=3, run_as_root=True, check_exit_code=True),
])
@mock.patch('os_brick.executor.Executor._execute')
def test_detach_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper,
attempts=3, run_as_root=True, check_exit_code=True),
])
def test_get_mangled_passphrase(self):
unmangled_raw_key = bytes(binascii.unhexlify('0725230b'))
symmetric_key = key.SymmetricKey('AES', len(unmangled_raw_key) * 8,
unmangled_raw_key)
unmangled_encoded_key = symmetric_key.get_encoded()
self.assertEqual(self.encryptor._get_mangled_passphrase(
unmangled_encoded_key), '72523b')
@mock.patch('os_brick.executor.Executor._execute')
def test_attach_volume_unmangle_passphrase(self, mock_execute):
fake_key = '0725230b'
fake_key_mangled = '72523b'
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = \
test_cryptsetup.fake__get_key(None, fake_key)
mock_execute.side_effect = [
putils.ProcessExecutionError(exit_code=2),
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT,
]
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key_mangled,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksAddKey', self.dev_path,
process_input=''.join([fake_key_mangled,
'\n', fake_key,
'\n', fake_key]),
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksClose', self.dev_name,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True, attempts=3),
mock.call('cryptsetup', 'luksRemoveKey', self.dev_path,
process_input=fake_key_mangled,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('cryptsetup', 'luksOpen', '--key-file=-', self.dev_path,
self.dev_name, process_input=fake_key,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
root_helper=self.root_helper, run_as_root=True,
check_exit_code=True),
], any_order=False)
self.assertEqual(9, mock_execute.call_count)
| true | true |
1c3307cede4860fba7a140ac76e94cbe6915d805 | 390 | py | Python | USMAN_MURTALA_ABDULLAHI/python assignment.py | Abdulusy/cil-internship-cohort-02 | 32571631567a8fbae24a83acad668ca0bf476f2c | [
"MIT"
] | null | null | null | USMAN_MURTALA_ABDULLAHI/python assignment.py | Abdulusy/cil-internship-cohort-02 | 32571631567a8fbae24a83acad668ca0bf476f2c | [
"MIT"
] | null | null | null | USMAN_MURTALA_ABDULLAHI/python assignment.py | Abdulusy/cil-internship-cohort-02 | 32571631567a8fbae24a83acad668ca0bf476f2c | [
"MIT"
] | null | null | null | import cv2
from cv2 import INTER_AREA
img = cv2.read("mm.jpg")
scale_percent=0.50
width=int(img.shape[1]*scale_percent)
height=int(img.shape[0]*scale_percent)
dimension=(width,height)
resized=cv2.resize(img,dimension,interpolation=INTER_AREA)
print(resized.shape)
cv2.imshow('outout', resized)
cv2.imwrite('resized_mm.jpg' , resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 24.375 | 59 | 0.75641 | import cv2
from cv2 import INTER_AREA
img = cv2.read("mm.jpg")
scale_percent=0.50
width=int(img.shape[1]*scale_percent)
height=int(img.shape[0]*scale_percent)
dimension=(width,height)
resized=cv2.resize(img,dimension,interpolation=INTER_AREA)
print(resized.shape)
cv2.imshow('outout', resized)
cv2.imwrite('resized_mm.jpg' , resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true | true |
1c33086347508a4ad3a1ba5a83f4b8ecd0df0c04 | 268 | py | Python | tests/artificial/transf_Integration/trend_LinearTrend/cycle_5/ar_/test_artificial_32_Integration_LinearTrend_5__100.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Integration/trend_LinearTrend/cycle_5/ar_/test_artificial_32_Integration_LinearTrend_5__100.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Integration/trend_LinearTrend/cycle_5/ar_/test_artificial_32_Integration_LinearTrend_5__100.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 5, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 0); | 38.285714 | 168 | 0.735075 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 5, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 0); | true | true |
1c330a28529e1fc48cb5c5a2804c113dc5d0059d | 1,954 | py | Python | lib/gcloud/logging/connection.py | huangkuan/hack | 433e213915749d9c510abf3c7462d5256b5e37eb | [
"Apache-2.0"
] | 1 | 2019-02-06T10:58:11.000Z | 2019-02-06T10:58:11.000Z | lib/gcloud/logging/connection.py | huangkuan/hack | 433e213915749d9c510abf3c7462d5256b5e37eb | [
"Apache-2.0"
] | 4 | 2017-10-24T21:47:53.000Z | 2019-09-22T13:12:57.000Z | lib/gcloud/logging/connection.py | huangkuan/hack | 433e213915749d9c510abf3c7462d5256b5e37eb | [
"Apache-2.0"
] | 2 | 2017-02-09T16:25:27.000Z | 2017-10-24T21:40:42.000Z | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with gcloud logging connections."""
from gcloud import connection as base_connection
class Connection(base_connection.JSONConnection):
"""A connection to Google Cloud Logging via the JSON REST API.
:type credentials: :class:`oauth2client.client.OAuth2Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
connection.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: (Optional) HTTP object to make requests.
:type api_base_url: string
:param api_base_url: The base of the API call URL. Defaults to the value
:attr:`Connection.API_BASE_URL`.
"""
API_BASE_URL = 'https://logging.googleapis.com'
"""The base of the API call URL."""
API_VERSION = 'v2beta1'
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}'
"""A template for the URL of a particular API call."""
SCOPE = ('https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/cloud-platform')
"""The scopes required for authenticating as a Cloud Logging consumer."""
| 39.877551 | 77 | 0.697544 |
from gcloud import connection as base_connection
class Connection(base_connection.JSONConnection):
API_BASE_URL = 'https://logging.googleapis.com'
API_VERSION = 'v2beta1'
API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}'
SCOPE = ('https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/cloud-platform')
| true | true |
1c330af6422c580419cba5ae220b45442bd3d372 | 24,375 | py | Python | praw/models/reddit/submission.py | RichardHoekstra/praw | 04c3056d146ab905d6b58d6502b8f4d1d9fb4435 | [
"BSD-2-Clause"
] | null | null | null | praw/models/reddit/submission.py | RichardHoekstra/praw | 04c3056d146ab905d6b58d6502b8f4d1d9fb4435 | [
"BSD-2-Clause"
] | null | null | null | praw/models/reddit/submission.py | RichardHoekstra/praw | 04c3056d146ab905d6b58d6502b8f4d1d9fb4435 | [
"BSD-2-Clause"
] | null | null | null | """Provide the Submission class."""
from typing import Any, Dict, List, Optional, TypeVar, Union
from urllib.parse import urljoin
from prawcore import Conflict
from ...const import API_PATH
from ...exceptions import InvalidURL
from ...util.cache import cachedproperty
from ..comment_forest import CommentForest
from ..listing.listing import Listing
from ..listing.mixins import SubmissionListingMixin
from .base import RedditBase
from .mixins import FullnameMixin, ThingModerationMixin, UserContentMixin
from .redditor import Redditor
from .subreddit import Subreddit
_Submission = TypeVar("_Submission")
Reddit = TypeVar("Reddit")
class SubmissionFlair:
"""Provide a set of functions pertaining to Submission flair."""
def __init__(self, submission: _Submission):
"""Create a SubmissionFlair instance.
:param submission: The submission associated with the flair functions.
"""
self.submission = submission
def choices(self) -> List[Dict[str, Union[bool, list, str]]]:
"""Return list of available flair choices.
Choices are required in order to use :meth:`.select`.
For example:
.. code-block:: python
choices = submission.flair.choices()
"""
url = API_PATH["flairselector"].format(
subreddit=self.submission.subreddit
)
return self.submission._reddit.post(
url, data={"link": self.submission.fullname}
)["choices"]
def select(self, flair_template_id: str, text: Optional[str] = None):
"""Select flair for submission.
:param flair_template_id: The flair template to select. The possible
``flair_template_id`` values can be discovered through
:meth:`.choices`.
:param text: If the template's ``flair_text_editable`` value is True,
this value will set a custom text (default: None).
For example, to select an arbitrary editable flair text (assuming there
is one) and set a custom value try:
.. code-block:: python
choices = submission.flair.choices()
template_id = next(x for x in choices
if x['flair_text_editable'])['flair_template_id']
submission.flair.select(template_id, 'my custom value')
"""
data = {
"flair_template_id": flair_template_id,
"link": self.submission.fullname,
"text": text,
}
url = API_PATH["select_flair"].format(
subreddit=self.submission.subreddit
)
self.submission._reddit.post(url, data=data)
class SubmissionModeration(ThingModerationMixin):
"""Provide a set of functions pertaining to Submission moderation.
Example usage:
.. code-block:: python
submission = reddit.submission(id="8dmv8z")
submission.mod.approve()
"""
REMOVAL_MESSAGE_API = "removal_link_message"
def __init__(self, submission: _Submission):
"""Create a SubmissionModeration instance.
:param submission: The submission to moderate.
"""
self.thing = submission
def contest_mode(self, state: bool = True):
"""Set contest mode for the comments of this submission.
:param state: (boolean) True enables contest mode, False, disables
(default: True).
Contest mode have the following effects:
* The comment thread will default to being sorted randomly.
* Replies to top-level comments will be hidden behind
"[show replies]" buttons.
* Scores will be hidden from non-moderators.
* Scores accessed through the API (mobile apps, bots) will be
obscured to "1" for non-moderators.
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.mod.contest_mode(state=True)
"""
self.thing._reddit.post(
API_PATH["contest_mode"],
data={"id": self.thing.fullname, "state": state},
)
def flair(
self,
text: str = "",
css_class: str = "",
flair_template_id: Optional[str] = None,
):
"""Set flair for the submission.
:param text: The flair text to associate with the Submission (default:
'').
:param css_class: The css class to associate with the flair html
(default: '').
:param flair_template_id: The flair template id to use when flairing
(Optional).
This method can only be used by an authenticated user who is a
moderator of the Submission's Subreddit.
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.mod.flair(text='PRAW', css_class='bot')
"""
data = {
"css_class": css_class,
"link": self.thing.fullname,
"text": text,
}
url = API_PATH["flair"].format(subreddit=self.thing.subreddit)
if flair_template_id is not None:
data["flair_template_id"] = flair_template_id
url = API_PATH["select_flair"].format(
subreddit=self.thing.subreddit
)
self.thing._reddit.post(url, data=data)
def nsfw(self):
"""Mark as not safe for work.
This method can be used both by the submission author and moderators of
the subreddit that the submission belongs to.
Example usage:
.. code-block:: python
submission = reddit.subreddit('test').submit('nsfw test',
selftext='nsfw')
submission.mod.nsfw()
See also :meth:`~.sfw`
"""
self.thing._reddit.post(
API_PATH["marknsfw"], data={"id": self.thing.fullname}
)
def set_original_content(self):
"""Mark as original content.
This method can be used by moderators of the subreddit that the
submission belongs to. If the subreddit has enabled the Original
Content beta feature in settings, then the submission's author
can use it as well.
Example usage:
.. code-block:: python
submission = reddit.subreddit('test').submit('oc test',
selftext='original')
submission.mod.set_original_content()
See also :meth:`.unset_original_content`
"""
data = {
"id": self.thing.id,
"fullname": self.thing.fullname,
"should_set_oc": True,
"executed": False,
"r": self.thing.subreddit,
}
self.thing._reddit.post(API_PATH["set_original_content"], data=data)
def sfw(self):
"""Mark as safe for work.
This method can be used both by the submission author and moderators of
the subreddit that the submission belongs to.
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.mod.sfw()
See also :meth:`~.nsfw`
"""
self.thing._reddit.post(
API_PATH["unmarknsfw"], data={"id": self.thing.fullname}
)
def spoiler(self):
"""Indicate that the submission contains spoilers.
This method can be used both by the submission author and moderators of
the subreddit that the submission belongs to.
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.mod.spoiler()
See also :meth:`~.unspoiler`
"""
self.thing._reddit.post(
API_PATH["spoiler"], data={"id": self.thing.fullname}
)
def sticky(
self, state: bool = True, bottom: bool = True,
):
"""Set the submission's sticky state in its subreddit.
:param state: (boolean) True sets the sticky for the submission, false
unsets (default: True).
:param bottom: (boolean) When true, set the submission as the bottom
sticky. If no top sticky exists, this submission will become the
top sticky regardless (default: True).
.. note:: When a submission is stickied two or more times, the Reddit
API responds with a 409 error that is raises as a ``Conflict`` by
PRAWCore. The method suppresses these ``Conflict`` errors.
This submission will replace the second stickied submission if one
exists.
For example:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.mod.sticky()
"""
data = {"id": self.thing.fullname, "state": state}
if not bottom:
data["num"] = 1
try:
return self.thing._reddit.post(
API_PATH["sticky_submission"], data=data
)
except Conflict:
pass
def suggested_sort(self, sort: str = "blank"):
"""Set the suggested sort for the comments of the submission.
:param sort: Can be one of: confidence, top, new, controversial, old,
random, qa, blank (default: blank).
"""
self.thing._reddit.post(
API_PATH["suggested_sort"],
data={"id": self.thing.fullname, "sort": sort},
)
def unset_original_content(self):
"""Indicate that the submission is not original content.
This method can be used by moderators of the subreddit that the
submission belongs to. If the subreddit has enabled the Original
Content beta feature in settings, then the submission's author
can use it as well.
Example usage:
.. code-block:: python
submission = reddit.subreddit('test').submit('oc test',
selftext='original')
submission.mod.unset_original_content()
See also :meth:`.set_original_content`
"""
data = {
"id": self.thing.id,
"fullname": self.thing.fullname,
"should_set_oc": False,
"executed": False,
"r": self.thing.subreddit,
}
self.thing._reddit.post(API_PATH["set_original_content"], data=data)
def unspoiler(self):
"""Indicate that the submission does not contain spoilers.
This method can be used both by the submission author and moderators of
the subreddit that the submission belongs to.
For example:
.. code-block:: python
submission = reddit.subreddit('test').submit('not spoiler',
selftext='spoiler')
submission.mod.unspoiler()
See also :meth:`~.spoiler`
"""
self.thing._reddit.post(
API_PATH["unspoiler"], data={"id": self.thing.fullname}
)
class Submission(
SubmissionListingMixin, UserContentMixin, FullnameMixin, RedditBase
):
"""A class for submissions to reddit.
**Typical Attributes**
This table describes attributes that typically belong to objects of this
class. Since attributes are dynamically provided (see
:ref:`determine-available-attributes-of-an-object`), there is not a
guarantee that these attributes will always be present, nor is this list
comprehensive in any way.
=========================== ===============================================
Attribute Description
=========================== ===============================================
``author`` Provides an instance of :class:`.Redditor`.
``clicked`` Whether or not the submission has been clicked
by the client.
``comments`` Provides an instance of
:class:`.CommentForest`.
``created_utc`` Time the submission was created, represented in
`Unix Time`_.
``distinguished`` Whether or not the submission is distinguished.
``edited`` Whether or not the submission has been edited.
``id`` ID of the submission.
``is_original_content`` Whether or not the submission has been set
as original content.
``is_self`` Whether or not the submission is a selfpost
(text-only).
``link_flair_template_id`` The link flair's ID, or None if not flaired.
``link_flair_text`` The link flair's text content, or None if not
flaired.
``locked`` Whether or not the submission has been locked.
``name`` Fullname of the submission.
``num_comments`` The number of comments on the submission.
``over_18`` Whether or not the submission has been marked
as NSFW.
``permalink`` A permalink for the submission.
``score`` The number of upvotes for the submission.
``selftext`` The submissions' selftext - an empty string if
a link post.
``spoiler`` Whether or not the submission has been marked
as a spoiler.
``stickied`` Whether or not the submission is stickied.
``subreddit`` Provides an instance of :class:`.Subreddit`.
``title`` The title of the submission.
``upvote_ratio`` The percentage of upvotes from all votes on the
submission.
``url`` The URL the submission links to, or the
permalink if a selfpost.
=========================== ===============================================
.. _Unix Time: https://en.wikipedia.org/wiki/Unix_time
"""
STR_FIELD = "id"
@staticmethod
def id_from_url(url: str) -> str:
"""Return the ID contained within a submission URL.
:param url: A url to a submission in one of the following formats (http
urls will also work):
* https://redd.it/2gmzqe
* https://reddit.com/comments/2gmzqe/
* https://www.reddit.com/r/redditdev/comments/2gmzqe/praw_https/
Raise :class:`.InvalidURL` if URL is not a valid submission URL.
"""
parts = RedditBase._url_parts(url)
if "comments" not in parts:
submission_id = parts[-1]
if "r" in parts:
raise InvalidURL(
url, message="Invalid URL (subreddit, not submission): {}",
)
else:
submission_id = parts[parts.index("comments") + 1]
if not submission_id.isalnum():
raise InvalidURL(url)
return submission_id
@property
def _kind(self):
"""Return the class's kind."""
return self._reddit.config.kinds["submission"]
@property
def comments(self) -> CommentForest:
"""Provide an instance of :class:`.CommentForest`.
This attribute can use used, for example, to obtain a flat list of
comments, with any :class:`.MoreComments` removed:
.. code-block:: python
submission.comments.replace_more(limit=0)
comments = submission.comments.list()
Sort order and comment limit can be set with the ``comment_sort`` and
``comment_limit`` attributes before comments are fetched, including
any call to :meth:`.replace_more`:
.. code-block:: python
submission.comment_sort = 'new'
comments = submission.comments.list()
.. note:: The appropriate values for ``comment_sort`` include
``confidence``, ``controversial``, ``new``, ``old``, ``q&a``,
and ``top``
See :ref:`extracting_comments` for more on working with a
:class:`.CommentForest`.
"""
# This assumes _comments is set so that _fetch is called when it's not.
return self._comments
@cachedproperty
def flair(self) -> SubmissionFlair:
"""Provide an instance of :class:`.SubmissionFlair`.
This attribute is used to work with flair as a regular user of the
subreddit the submission belongs to. Moderators can directly use
:meth:`.flair`.
For example, to select an arbitrary editable flair text (assuming there
is one) and set a custom value try:
.. code-block:: python
choices = submission.flair.choices()
template_id = next(x for x in choices
if x['flair_text_editable'])['flair_template_id']
submission.flair.select(template_id, 'my custom value')
"""
return SubmissionFlair(self)
@cachedproperty
def mod(self) -> SubmissionModeration:
"""Provide an instance of :class:`.SubmissionModeration`.
Example usage:
.. code-block:: python
submission = reddit.submission(id="8dmv8z")
submission.mod.approve()
"""
return SubmissionModeration(self)
@property
def shortlink(self) -> str:
"""Return a shortlink to the submission.
For example http://redd.it/eorhm is a shortlink for
https://www.reddit.com/r/announcements/comments/eorhm/reddit_30_less_typing/.
"""
return urljoin(self._reddit.config.short_url, self.id)
def __init__(
self,
reddit: Reddit,
id: Optional[str] = None, # pylint: disable=redefined-builtin
url: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
"""Initialize a Submission instance.
:param reddit: An instance of :class:`~.Reddit`.
:param id: A reddit base36 submission ID, e.g., ``2gmzqe``.
:param url: A URL supported by
:meth:`~praw.models.Submission.id_from_url`.
Either ``id`` or ``url`` can be provided, but not both.
"""
if (id, url, _data).count(None) != 2:
raise TypeError(
"Exactly one of `id`, `url`, or `_data` must be provided."
)
super().__init__(reddit, _data=_data)
self.comment_limit = 2048
# Specify the sort order for ``comments``
self.comment_sort = "confidence"
if id is not None:
self.id = id
elif url is not None:
self.id = self.id_from_url(url)
self._comments_by_id = {}
def __setattr__(self, attribute: str, value: Any):
"""Objectify author, and subreddit attributes."""
if attribute == "author":
value = Redditor.from_data(self._reddit, value)
elif attribute == "subreddit":
value = Subreddit(self._reddit, value)
super().__setattr__(attribute, value)
def _chunk(self, other_submissions, chunk_size):
all_submissions = [self.fullname]
if other_submissions:
all_submissions += [x.fullname for x in other_submissions]
for position in range(0, len(all_submissions), chunk_size):
yield ",".join(all_submissions[position : position + 50])
def _fetch_info(self):
return (
"submission",
{"id": self.id},
{"limit": self.comment_limit, "sort": self.comment_sort},
)
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
submission_listing, comment_listing = data
comment_listing = Listing(self._reddit, _data=comment_listing["data"])
submission_data = submission_listing["data"]["children"][0]["data"]
submission = type(self)(self._reddit, _data=submission_data)
delattr(submission, "comment_limit")
delattr(submission, "comment_sort")
submission._comments = CommentForest(self)
self.__dict__.update(submission.__dict__)
self.comments._update(comment_listing.children)
self._fetched = True
def mark_visited(self):
"""Mark submission as visited.
This method requires a subscription to reddit premium.
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.mark_visited()
"""
data = {"links": self.fullname}
self._reddit.post(API_PATH["store_visits"], data=data)
def hide(self, other_submissions: Optional[List[_Submission]] = None):
"""Hide Submission.
:param other_submissions: When provided, additionally
hide this list of :class:`.Submission` instances
as part of a single request (default: None).
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.hide()
See also :meth:`~.unhide`
"""
for submissions in self._chunk(other_submissions, 50):
self._reddit.post(API_PATH["hide"], data={"id": submissions})
def unhide(self, other_submissions: Optional[List[_Submission]] = None):
"""Unhide Submission.
:param other_submissions: When provided, additionally
unhide this list of :class:`.Submission` instances
as part of a single request (default: None).
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.unhide()
See also :meth:`~.hide`
"""
for submissions in self._chunk(other_submissions, 50):
self._reddit.post(API_PATH["unhide"], data={"id": submissions})
def crosspost(
self,
subreddit: Subreddit,
title: Optional[str] = None,
send_replies: bool = True,
flair_id: Optional[str] = None,
flair_text: Optional[str] = None,
nsfw: bool = False,
spoiler: bool = False,
) -> _Submission:
"""Crosspost the submission to a subreddit.
.. note::
Be aware you have to be subscribed to the target subreddit.
:param subreddit: Name of the subreddit or :class:`~.Subreddit`
object to crosspost into.
:param title: Title of the submission. Will use this submission's
title if `None` (default: None).
:param flair_id: The flair template to select (default: None).
:param flair_text: If the template's ``flair_text_editable`` value is
True, this value will set a custom text (default: None).
:param send_replies: When True, messages will be sent to the
submission author when comments are made to the submission
(default: True).
:param nsfw: Whether or not the submission should be marked NSFW
(default: False).
:param spoiler: Whether or not the submission should be marked as
a spoiler (default: False).
:returns: A :class:`~.Submission` object for the newly created
submission.
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
cross_post = submission.crosspost(subreddit="learnprogramming",
send_replies=False)
See also :meth:`~.hide`
"""
if title is None:
title = self.title
data = {
"sr": str(subreddit),
"title": title,
"sendreplies": bool(send_replies),
"kind": "crosspost",
"crosspost_fullname": self.fullname,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
}
for key, value in (("flair_id", flair_id), ("flair_text", flair_text)):
if value is not None:
data[key] = value
return self._reddit.post(API_PATH["submit"], data=data)
Subreddit._submission_class = Submission
| 33.667127 | 85 | 0.581538 | from typing import Any, Dict, List, Optional, TypeVar, Union
from urllib.parse import urljoin
from prawcore import Conflict
from ...const import API_PATH
from ...exceptions import InvalidURL
from ...util.cache import cachedproperty
from ..comment_forest import CommentForest
from ..listing.listing import Listing
from ..listing.mixins import SubmissionListingMixin
from .base import RedditBase
from .mixins import FullnameMixin, ThingModerationMixin, UserContentMixin
from .redditor import Redditor
from .subreddit import Subreddit
_Submission = TypeVar("_Submission")
Reddit = TypeVar("Reddit")
class SubmissionFlair:
def __init__(self, submission: _Submission):
self.submission = submission
def choices(self) -> List[Dict[str, Union[bool, list, str]]]:
url = API_PATH["flairselector"].format(
subreddit=self.submission.subreddit
)
return self.submission._reddit.post(
url, data={"link": self.submission.fullname}
)["choices"]
def select(self, flair_template_id: str, text: Optional[str] = None):
data = {
"flair_template_id": flair_template_id,
"link": self.submission.fullname,
"text": text,
}
url = API_PATH["select_flair"].format(
subreddit=self.submission.subreddit
)
self.submission._reddit.post(url, data=data)
class SubmissionModeration(ThingModerationMixin):
REMOVAL_MESSAGE_API = "removal_link_message"
def __init__(self, submission: _Submission):
self.thing = submission
def contest_mode(self, state: bool = True):
self.thing._reddit.post(
API_PATH["contest_mode"],
data={"id": self.thing.fullname, "state": state},
)
def flair(
self,
text: str = "",
css_class: str = "",
flair_template_id: Optional[str] = None,
):
data = {
"css_class": css_class,
"link": self.thing.fullname,
"text": text,
}
url = API_PATH["flair"].format(subreddit=self.thing.subreddit)
if flair_template_id is not None:
data["flair_template_id"] = flair_template_id
url = API_PATH["select_flair"].format(
subreddit=self.thing.subreddit
)
self.thing._reddit.post(url, data=data)
def nsfw(self):
self.thing._reddit.post(
API_PATH["marknsfw"], data={"id": self.thing.fullname}
)
def set_original_content(self):
data = {
"id": self.thing.id,
"fullname": self.thing.fullname,
"should_set_oc": True,
"executed": False,
"r": self.thing.subreddit,
}
self.thing._reddit.post(API_PATH["set_original_content"], data=data)
def sfw(self):
self.thing._reddit.post(
API_PATH["unmarknsfw"], data={"id": self.thing.fullname}
)
def spoiler(self):
self.thing._reddit.post(
API_PATH["spoiler"], data={"id": self.thing.fullname}
)
def sticky(
self, state: bool = True, bottom: bool = True,
):
data = {"id": self.thing.fullname, "state": state}
if not bottom:
data["num"] = 1
try:
return self.thing._reddit.post(
API_PATH["sticky_submission"], data=data
)
except Conflict:
pass
def suggested_sort(self, sort: str = "blank"):
self.thing._reddit.post(
API_PATH["suggested_sort"],
data={"id": self.thing.fullname, "sort": sort},
)
def unset_original_content(self):
data = {
"id": self.thing.id,
"fullname": self.thing.fullname,
"should_set_oc": False,
"executed": False,
"r": self.thing.subreddit,
}
self.thing._reddit.post(API_PATH["set_original_content"], data=data)
def unspoiler(self):
self.thing._reddit.post(
API_PATH["unspoiler"], data={"id": self.thing.fullname}
)
class Submission(
SubmissionListingMixin, UserContentMixin, FullnameMixin, RedditBase
):
STR_FIELD = "id"
@staticmethod
def id_from_url(url: str) -> str:
parts = RedditBase._url_parts(url)
if "comments" not in parts:
submission_id = parts[-1]
if "r" in parts:
raise InvalidURL(
url, message="Invalid URL (subreddit, not submission): {}",
)
else:
submission_id = parts[parts.index("comments") + 1]
if not submission_id.isalnum():
raise InvalidURL(url)
return submission_id
@property
def _kind(self):
return self._reddit.config.kinds["submission"]
@property
def comments(self) -> CommentForest:
return self._comments
@cachedproperty
def flair(self) -> SubmissionFlair:
return SubmissionFlair(self)
@cachedproperty
def mod(self) -> SubmissionModeration:
return SubmissionModeration(self)
@property
def shortlink(self) -> str:
return urljoin(self._reddit.config.short_url, self.id)
def __init__(
self,
reddit: Reddit,
id: Optional[str] = None, # pylint: disable=redefined-builtin
url: Optional[str] = None,
_data: Optional[Dict[str, Any]] = None,
):
if (id, url, _data).count(None) != 2:
raise TypeError(
"Exactly one of `id`, `url`, or `_data` must be provided."
)
super().__init__(reddit, _data=_data)
self.comment_limit = 2048
# Specify the sort order for ``comments``
self.comment_sort = "confidence"
if id is not None:
self.id = id
elif url is not None:
self.id = self.id_from_url(url)
self._comments_by_id = {}
def __setattr__(self, attribute: str, value: Any):
if attribute == "author":
value = Redditor.from_data(self._reddit, value)
elif attribute == "subreddit":
value = Subreddit(self._reddit, value)
super().__setattr__(attribute, value)
def _chunk(self, other_submissions, chunk_size):
all_submissions = [self.fullname]
if other_submissions:
all_submissions += [x.fullname for x in other_submissions]
for position in range(0, len(all_submissions), chunk_size):
yield ",".join(all_submissions[position : position + 50])
def _fetch_info(self):
return (
"submission",
{"id": self.id},
{"limit": self.comment_limit, "sort": self.comment_sort},
)
def _fetch_data(self):
name, fields, params = self._fetch_info()
path = API_PATH[name].format(**fields)
return self._reddit.request("GET", path, params)
def _fetch(self):
data = self._fetch_data()
submission_listing, comment_listing = data
comment_listing = Listing(self._reddit, _data=comment_listing["data"])
submission_data = submission_listing["data"]["children"][0]["data"]
submission = type(self)(self._reddit, _data=submission_data)
delattr(submission, "comment_limit")
delattr(submission, "comment_sort")
submission._comments = CommentForest(self)
self.__dict__.update(submission.__dict__)
self.comments._update(comment_listing.children)
self._fetched = True
def mark_visited(self):
data = {"links": self.fullname}
self._reddit.post(API_PATH["store_visits"], data=data)
def hide(self, other_submissions: Optional[List[_Submission]] = None):
for submissions in self._chunk(other_submissions, 50):
self._reddit.post(API_PATH["hide"], data={"id": submissions})
def unhide(self, other_submissions: Optional[List[_Submission]] = None):
for submissions in self._chunk(other_submissions, 50):
self._reddit.post(API_PATH["unhide"], data={"id": submissions})
def crosspost(
self,
subreddit: Subreddit,
title: Optional[str] = None,
send_replies: bool = True,
flair_id: Optional[str] = None,
flair_text: Optional[str] = None,
nsfw: bool = False,
spoiler: bool = False,
) -> _Submission:
if title is None:
title = self.title
data = {
"sr": str(subreddit),
"title": title,
"sendreplies": bool(send_replies),
"kind": "crosspost",
"crosspost_fullname": self.fullname,
"nsfw": bool(nsfw),
"spoiler": bool(spoiler),
}
for key, value in (("flair_id", flair_id), ("flair_text", flair_text)):
if value is not None:
data[key] = value
return self._reddit.post(API_PATH["submit"], data=data)
Subreddit._submission_class = Submission
| true | true |
1c330b0f0181bf6c532f368e8052f97542502d53 | 79,228 | py | Python | src/aks-preview/azext_aks_preview/tests/latest/test_decorator.py | Arkanayan/azure-cli-extensions | b45ca4061e0428d6438e0a8fb9436994f0205949 | [
"MIT"
] | 4 | 2019-08-20T02:58:03.000Z | 2020-05-22T10:23:11.000Z | src/aks-preview/azext_aks_preview/tests/latest/test_decorator.py | Arkanayan/azure-cli-extensions | b45ca4061e0428d6438e0a8fb9436994f0205949 | [
"MIT"
] | 4 | 2019-08-29T07:20:57.000Z | 2021-12-21T10:04:53.000Z | src/aks-preview/azext_aks_preview/tests/latest/test_decorator.py | Arkanayan/azure-cli-extensions | b45ca4061e0428d6438e0a8fb9436994f0205949 | [
"MIT"
] | 4 | 2019-08-29T02:52:14.000Z | 2021-09-14T10:41:48.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import importlib
import unittest
from unittest.mock import Mock, patch
import requests
from azext_aks_preview.__init__ import register_aks_preview_resource_type
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from azext_aks_preview._consts import (
ADDONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_GITOPS_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY,
CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
CONST_ROTATION_POLL_INTERVAL,
CONST_SECRET_ROTATION_ENABLED,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
)
from azext_aks_preview.decorator import (
AKSPreviewContext,
AKSPreviewCreateDecorator,
AKSPreviewModels,
AKSPreviewUpdateDecorator,
)
from azext_aks_preview.tests.latest.mocks import MockCLI, MockClient, MockCmd
from azext_aks_preview.tests.latest.test_aks_commands import _get_test_data_file
from azure.cli.command_modules.acs._consts import (
DecoratorEarlyExitException,
DecoratorMode,
)
from azure.cli.core.azclierror import (
CLIInternalError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
RequiredArgumentMissingError,
)
from msrestazure.azure_exceptions import CloudError
class AKSPreviewModelsTestCase(unittest.TestCase):
def setUp(self):
# manually register CUSTOM_MGMT_AKS_PREVIEW
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
def test_models(self):
models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW)
# load models directly (instead of through the `get_sdk` method provided by the cli component)
from azure.cli.core.profiles._shared import AZURE_API_PROFILES
sdk_profile = AZURE_API_PROFILES["latest"][CUSTOM_MGMT_AKS_PREVIEW]
api_version = sdk_profile.default_api_version
module_name = "azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.v{}.models".format(
api_version.replace("-", "_")
)
module = importlib.import_module(module_name)
self.assertEqual(models.KubeletConfig, getattr(module, "KubeletConfig"))
self.assertEqual(models.LinuxOSConfig, getattr(module, "LinuxOSConfig"))
self.assertEqual(
models.ManagedClusterHTTPProxyConfig,
getattr(module, "ManagedClusterHTTPProxyConfig"),
)
self.assertEqual(
models.ManagedClusterPodIdentityProfile,
getattr(module, "ManagedClusterPodIdentityProfile"),
)
self.assertEqual(
models.WindowsGmsaProfile, getattr(module, "WindowsGmsaProfile")
)
self.assertEqual(models.CreationData, getattr(module, "CreationData"))
# nat gateway models
self.assertEqual(
models.nat_gateway_models.get("ManagedClusterNATGatewayProfile"),
getattr(module, "ManagedClusterNATGatewayProfile"),
)
self.assertEqual(
models.nat_gateway_models.get(
"ManagedClusterManagedOutboundIPProfile"
),
getattr(module, "ManagedClusterManagedOutboundIPProfile"),
)
class AKSPreviewContextTestCase(unittest.TestCase):
def setUp(self):
# manually register CUSTOM_MGMT_AKS_PREVIEW
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW)
def test__get_vm_set_type(self):
# default & dynamic completion
ctx_1 = AKSPreviewContext(
self.cmd,
{
"vm_set_type": None,
"kubernetes_version": "",
"enable_vmss": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1._get_vm_set_type(read_only=True), None)
self.assertEqual(ctx_1.get_vm_set_type(), "VirtualMachineScaleSets")
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_ap_name", type="test_mc_vm_set_type"
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_vm_set_type(), "test_mc_vm_set_type")
# custom value & dynamic completion
ctx_2 = AKSPreviewContext(
self.cmd,
{
"vm_set_type": "availabilityset",
"kubernetes_version": "",
"enable_vmss": True,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid vm_set_type when enable_vmss is specified
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_2.get_vm_set_type(), "AvailabilitySet")
# custom value & dynamic completion
ctx_3 = AKSPreviewContext(
self.cmd,
{
"vm_set_type": None,
"kubernetes_version": "",
"enable_vmss": True,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid vm_set_type when enable_vmss is specified
self.assertEqual(ctx_3.get_vm_set_type(), "VirtualMachineScaleSets")
def test_get_zones(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"node_zones": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_zones(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
availability_zones=["test_mc_zones1", "test_mc_zones2"],
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_zones(), ["test_mc_zones1", "test_mc_zones2"]
)
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{"node_zones": ["test_zones1", "test_zones2"]},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_2.get_zones(), ["test_zones1", "test_zones2"])
def test_get_pod_subnet_id(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"pod_subnet_id": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_pod_subnet_id(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name", pod_subnet_id="test_mc_pod_subnet_id"
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_pod_subnet_id(), "test_mc_pod_subnet_id")
def test_get_enable_fips_image(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_fips_image": False},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_fips_image(), False)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
enable_fips=True,
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_fips_image(), True)
def test_get_workload_runtime(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"workload_runtime": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_workload_runtime(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
workload_runtime="test_mc_workload_runtime",
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_workload_runtime(), "test_mc_workload_runtime"
)
def test_get_gpu_instance_profile(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"gpu_instance_profile": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_gpu_instance_profile(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
gpu_instance_profile="test_mc_gpu_instance_profile",
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_gpu_instance_profile(), "test_mc_gpu_instance_profile"
)
def test_get_kubelet_config(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"kubelet_config": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_kubelet_config(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
kubelet_config=self.models.KubeletConfig(pod_max_pids=100),
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_kubelet_config(),
self.models.KubeletConfig(pod_max_pids=100),
)
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{"kubelet_config": "fake-path"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid file path
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_kubelet_config()
# custom value
ctx_3 = AKSPreviewContext(
self.cmd,
{"kubelet_config": _get_test_data_file("invalidconfig.json")},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid file path
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_kubelet_config()
def test_get_linux_os_config(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"linux_os_config": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_linux_os_config(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
linux_os_config=self.models.LinuxOSConfig(swap_file_size_mb=200),
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_linux_os_config(),
self.models.LinuxOSConfig(swap_file_size_mb=200),
)
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{"linux_os_config": "fake-path"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid file path
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_linux_os_config()
# custom value
ctx_3 = AKSPreviewContext(
self.cmd,
{"linux_os_config": _get_test_data_file("invalidconfig.json")},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid file path
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_linux_os_config()
def test_get_http_proxy_config(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"http_proxy_config": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_http_proxy_config(), None)
mc = self.models.ManagedCluster(
location="test_location",
http_proxy_config=self.models.ManagedClusterHTTPProxyConfig(
http_proxy="test_http_proxy"
),
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_http_proxy_config(),
self.models.ManagedClusterHTTPProxyConfig(
http_proxy="test_http_proxy"
),
)
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{"http_proxy_config": "fake-path"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid file path
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_http_proxy_config()
# custom value
ctx_3 = AKSPreviewContext(
self.cmd,
{"http_proxy_config": _get_test_data_file("invalidconfig.json")},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid file path
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_http_proxy_config()
def test_get_node_resource_group(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"node_resource_group": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_node_resource_group(), None)
mc = self.models.ManagedCluster(
location="test_location",
node_resource_group="test_node_resource_group",
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_node_resource_group(), "test_node_resource_group"
)
def test_get_nat_gateway_managed_outbound_ip_count(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"nat_gateway_managed_outbound_ip_count": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(
ctx_1.get_nat_gateway_managed_outbound_ip_count(), None
)
nat_gateway_profile = self.models.nat_gateway_models.get(
"ManagedClusterNATGatewayProfile"
)(
managed_outbound_ip_profile=self.models.nat_gateway_models.get(
"ManagedClusterManagedOutboundIPProfile"
)(count=10)
)
network_profile = self.models.ContainerServiceNetworkProfile(
nat_gateway_profile=nat_gateway_profile
)
mc = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_nat_gateway_managed_outbound_ip_count(), 10)
def test_get_nat_gateway_idle_timeout(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"nat_gateway_idle_timeout": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_nat_gateway_idle_timeout(), None)
nat_gateway_profile = self.models.nat_gateway_models.get(
"ManagedClusterNATGatewayProfile"
)(
idle_timeout_in_minutes=20,
)
network_profile = self.models.ContainerServiceNetworkProfile(
nat_gateway_profile=nat_gateway_profile
)
mc = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_nat_gateway_idle_timeout(), 20)
def test_get_enable_pod_security_policy(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_pod_security_policy": False},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_pod_security_policy(), False)
mc = self.models.ManagedCluster(
location="test_location",
enable_pod_security_policy=True,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_pod_security_policy(), True)
def test_get_enable_managed_identity(self):
# custom value
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_managed_identity": False, "enable_pod_identity": True},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(ctx_1.get_enable_managed_identity(), False)
def test_get_enable_pod_identity(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_pod_identity": False},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_pod_identity(), False)
pod_identity_profile = self.models.ManagedClusterPodIdentityProfile(
enabled=True
)
mc = self.models.ManagedCluster(
location="test_location",
pod_identity_profile=pod_identity_profile,
)
ctx_1.attach_mc(mc)
# fail on enable_managed_identity not specified
with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(ctx_1.get_enable_pod_identity(), True)
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{
"enable_managed_identity": True,
"enable_pod_identity": True,
"enable_pod_identity_with_kubenet": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet"
)
mc_2 = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile_2,
)
ctx_2.attach_mc(mc_2)
# fail on enable_pod_identity_with_kubenet not specified
with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(ctx_2.get_enable_pod_identity(), True)
def test_get_enable_pod_identity_with_kubenet(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_pod_identity_with_kubenet": False},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_pod_identity_with_kubenet(), False)
pod_identity_profile = self.models.ManagedClusterPodIdentityProfile(
enabled=True,
allow_network_plugin_kubenet=True,
)
mc = self.models.ManagedCluster(
location="test_location",
pod_identity_profile=pod_identity_profile,
)
ctx_1.attach_mc(mc)
# fail on enable_managed_identity not specified
# with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(ctx_1.get_enable_pod_identity_with_kubenet(), True)
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{
"enable_managed_identity": True,
"enable_pod_identity": True,
"enable_pod_identity_with_kubenet": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet"
)
mc_2 = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile_2,
)
ctx_2.attach_mc(mc_2)
# fail on enable_pod_identity_with_kubenet not specified
with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(
ctx_2.get_enable_pod_identity_with_kubenet(), False
)
def test_get_addon_consts(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
addon_consts = ctx_1.get_addon_consts()
ground_truth_addon_consts = {
"ADDONS": ADDONS,
"CONST_ACC_SGX_QUOTE_HELPER_ENABLED": CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
"CONST_AZURE_POLICY_ADDON_NAME": CONST_AZURE_POLICY_ADDON_NAME,
"CONST_CONFCOM_ADDON_NAME": CONST_CONFCOM_ADDON_NAME,
"CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME": CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
"CONST_INGRESS_APPGW_ADDON_NAME": CONST_INGRESS_APPGW_ADDON_NAME,
"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID": CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME": CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
"CONST_INGRESS_APPGW_SUBNET_CIDR": CONST_INGRESS_APPGW_SUBNET_CIDR,
"CONST_INGRESS_APPGW_SUBNET_ID": CONST_INGRESS_APPGW_SUBNET_ID,
"CONST_INGRESS_APPGW_WATCH_NAMESPACE": CONST_INGRESS_APPGW_WATCH_NAMESPACE,
"CONST_KUBE_DASHBOARD_ADDON_NAME": CONST_KUBE_DASHBOARD_ADDON_NAME,
"CONST_MONITORING_ADDON_NAME": CONST_MONITORING_ADDON_NAME,
"CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID": CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
"CONST_OPEN_SERVICE_MESH_ADDON_NAME": CONST_OPEN_SERVICE_MESH_ADDON_NAME,
"CONST_VIRTUAL_NODE_ADDON_NAME": CONST_VIRTUAL_NODE_ADDON_NAME,
"CONST_VIRTUAL_NODE_SUBNET_NAME": CONST_VIRTUAL_NODE_SUBNET_NAME,
"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME": CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
"CONST_SECRET_ROTATION_ENABLED": CONST_SECRET_ROTATION_ENABLED,
"CONST_ROTATION_POLL_INTERVAL": CONST_ROTATION_POLL_INTERVAL,
# new addon consts in aks-preview
"CONST_GITOPS_ADDON_NAME": CONST_GITOPS_ADDON_NAME,
"CONST_MONITORING_USING_AAD_MSI_AUTH": CONST_MONITORING_USING_AAD_MSI_AUTH,
}
self.assertEqual(addon_consts, ground_truth_addon_consts)
def test_get_appgw_subnet_prefix(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{
"appgw_subnet_prefix": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_appgw_subnet_prefix(), None)
addon_profiles_1 = {
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_prefix"
},
)
}
mc = self.models.ManagedCluster(
location="test_location", addon_profiles=addon_profiles_1
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_appgw_subnet_prefix(), "test_appgw_subnet_prefix"
)
def test_get_enable_msi_auth_for_monitoring(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{
"enable_msi_auth_for_monitoring": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_msi_auth_for_monitoring(), False)
addon_profiles_1 = {
CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_MONITORING_USING_AAD_MSI_AUTH: True},
)
}
mc = self.models.ManagedCluster(
location="test_location", addon_profiles=addon_profiles_1
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_msi_auth_for_monitoring(), True)
def test_get_no_wait(self):
# custom value
ctx_1 = AKSPreviewContext(
self.cmd,
{
"no_wait": True,
"enable_msi_auth_for_monitoring": True,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
ctx_1.set_intermediate("monitoring", True, overwrite_exists=True)
self.assertEqual(ctx_1.get_no_wait(), False)
def test_validate_gmsa_options(self):
# default
ctx = AKSPreviewContext(
self.cmd,
{},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
ctx._AKSPreviewContext__validate_gmsa_options(False, None, None, False)
ctx._AKSPreviewContext__validate_gmsa_options(True, None, None, True)
# fail on yes & prompt_y_n not specified
with patch(
"azext_aks_preview.decorator.prompt_y_n",
return_value=False,
), self.assertRaises(DecoratorEarlyExitException):
ctx._AKSPreviewContext__validate_gmsa_options(
True, None, None, False
)
# fail on gmsa_root_domain_name not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx._AKSPreviewContext__validate_gmsa_options(
True, "test_gmsa_dns_server", None, False
)
# fail on enable_windows_gmsa not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx._AKSPreviewContext__validate_gmsa_options(
False, None, "test_gmsa_root_domain_name", False
)
def test_get_enable_windows_gmsa(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{
"enable_windows_gmsa": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_windows_gmsa(), False)
windows_gmsa_profile_1 = self.models.WindowsGmsaProfile(enabled=True)
windows_profile_1 = self.models.ManagedClusterWindowsProfile(
admin_username="test_admin_username",
gmsa_profile=windows_gmsa_profile_1,
)
mc = self.models.ManagedCluster(
location="test_location", windows_profile=windows_profile_1
)
ctx_1.attach_mc(mc)
with patch(
"azext_aks_preview.decorator.prompt_y_n",
return_value=True,
):
self.assertEqual(ctx_1.get_enable_windows_gmsa(), True)
def test_get_gmsa_dns_server_and_root_domain_name(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(
ctx_1.get_gmsa_dns_server_and_root_domain_name(), (None, None)
)
windows_gmsa_profile_1 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_dns_server",
root_domain_name="test_root_domain_name",
)
windows_profile_1 = self.models.ManagedClusterWindowsProfile(
admin_username="test_admin_username",
gmsa_profile=windows_gmsa_profile_1,
)
mc = self.models.ManagedCluster(
location="test_location", windows_profile=windows_profile_1
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_gmsa_dns_server_and_root_domain_name(),
("test_dns_server", "test_root_domain_name"),
)
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{
"enable_windows_gmsa": True,
"gmsa_dns_server": "test_gmsa_dns_server",
"gmsa_root_domain_name": "test_gmsa_root_domain_name",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
windows_gmsa_profile_2 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_dns_server",
root_domain_name=None,
)
windows_profile_2 = self.models.ManagedClusterWindowsProfile(
admin_username="test_admin_username",
gmsa_profile=windows_gmsa_profile_2,
)
mc = self.models.ManagedCluster(
location="test_location", windows_profile=windows_profile_2
)
ctx_2.attach_mc(mc)
# fail on inconsistent state
with self.assertRaises(CLIInternalError):
ctx_2.get_gmsa_dns_server_and_root_domain_name()
def test_get_snapshot_id(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{
"snapshot_id": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_snapshot_id(), None)
creation_data = self.models.CreationData(
source_resource_id="test_source_resource_id"
)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name", creation_data=creation_data
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_snapshot_id(), "test_source_resource_id")
def test_get_snapshot(self):
# custom value
ctx_1 = AKSPreviewContext(
self.cmd,
{
"snapshot_id": "test_source_resource_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock()
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_1.get_snapshot(), mock_snapshot)
# test cache
self.assertEqual(ctx_1.get_snapshot(), mock_snapshot)
def test_get_kubernetes_version(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"kubernetes_version": ""},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_kubernetes_version(), "")
mc = self.models.ManagedCluster(
location="test_location",
kubernetes_version="test_mc_kubernetes_version",
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_kubernetes_version(), "test_mc_kubernetes_version"
)
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{"kubernetes_version": "", "snapshot_id": "test_snapshot_id"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(kubernetes_version="test_kubernetes_version")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(
ctx_2.get_kubernetes_version(), "test_kubernetes_version"
)
# custom value
ctx_3 = AKSPreviewContext(
self.cmd,
{
"kubernetes_version": "custom_kubernetes_version",
"snapshot_id": "test_snapshot_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(kubernetes_version="test_kubernetes_version")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(
ctx_3.get_kubernetes_version(), "custom_kubernetes_version"
)
def test_get_os_sku(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"os_sku": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_os_sku(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name", os_sku="test_mc_os_sku"
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_os_sku(), "test_mc_os_sku")
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{"os_sku": None, "snapshot_id": "test_snapshot_id"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(os_sku="test_os_sku")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_2.get_os_sku(), "test_os_sku")
# custom value
ctx_3 = AKSPreviewContext(
self.cmd,
{
"os_sku": "custom_os_sku",
"snapshot_id": "test_snapshot_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(os_sku="test_os_sku")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_3.get_os_sku(), "custom_os_sku")
def test_get_node_vm_size(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{"node_vm_size": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_node_vm_size(), "Standard_DS2_v2")
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name", vm_size="Standard_ABCD_v2"
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_node_vm_size(), "Standard_ABCD_v2")
# custom value
ctx_2 = AKSPreviewContext(
self.cmd,
{"node_vm_size": None, "snapshot_id": "test_snapshot_id"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(vm_size="test_vm_size")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_2.get_node_vm_size(), "test_vm_size")
# custom value
ctx_3 = AKSPreviewContext(
self.cmd,
{
"node_vm_size": "custom_node_vm_size",
"snapshot_id": "test_snapshot_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(vm_size="test_vm_size")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_3.get_node_vm_size(), "custom_node_vm_size")
def test_test_get_outbound_type(self):
# default
ctx_1 = AKSPreviewContext(
self.cmd,
{
"outbound_type": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1._get_outbound_type(read_only=True), None)
self.assertEqual(ctx_1.get_outbound_type(), "loadBalancer")
network_profile_1 = self.models.ContainerServiceNetworkProfile(
outbound_type="test_outbound_type"
)
mc = self.models.ManagedCluster(
location="test_location", network_profile=network_profile_1
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_outbound_type(), "test_outbound_type")
# invalid parameter
ctx_2 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY,
"load_balancer_sku": "basic",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid load_balancer_sku (basic) when outbound_type is CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_outbound_type()
# invalid parameter
ctx_3 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,
"load_balancer_sku": "basic",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on invalid load_balancer_sku (basic) when outbound_type is CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_outbound_type()
# invalid parameter
ctx_4 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,
"vnet_subnet_id": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on vnet_subnet_id not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_4.get_outbound_type()
# invalid parameter
ctx_5 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
"vnet_subnet_id": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on vnet_subnet_id not specified
with self.assertRaises(RequiredArgumentMissingError):
ctx_5.get_outbound_type()
# invalid parameter
ctx_6 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
"vnet_subnet_id": "test_vnet_subnet_id",
"load_balancer_managed_outbound_ip_count": 10,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
# fail on mutually exclusive outbound_type and managed_outbound_ip_count/outbound_ips/outbound_ip_prefixes of
# load balancer
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_6.get_outbound_type()
# invalid parameter
ctx_7 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
"vnet_subnet_id": "test_vnet_subnet_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
load_balancer_profile = self.models.lb_models.get(
"ManagedClusterLoadBalancerProfile"
)(
outbound_ip_prefixes=self.models.lb_models.get(
"ManagedClusterLoadBalancerProfileOutboundIPPrefixes"
)(
public_ip_prefixes=[
self.models.lb_models.get("ResourceReference")(
id="test_public_ip_prefix"
)
]
)
)
# fail on mutually exclusive outbound_type and managed_outbound_ip_count/outbound_ips/outbound_ip_prefixes of
# load balancer
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_7.get_outbound_type(
load_balancer_profile=load_balancer_profile,
)
class AKSPreviewCreateDecoratorTestCase(unittest.TestCase):
def setUp(self):
# manually register CUSTOM_MGMT_AKS_PREVIEW
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW)
self.client = MockClient()
def test_set_up_agent_pool_profiles(self):
# default value in `aks_create`
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"nodepool_name": "nodepool1",
"nodepool_tags": None,
"nodepool_labels": None,
"node_count": 3,
"node_vm_size": "Standard_DS2_v2",
"os_sku": None,
"vnet_subnet_id": None,
"pod_subnet_id": None,
"ppg": None,
"zones": None,
"enable_node_public_ip": False,
"enable_fips_image": False,
"node_public_ip_prefix_id": None,
"enable_encryption_at_host": False,
"enable_ultra_ssd": False,
"max_pods": 0,
"node_osdisk_size": 0,
"node_osdisk_type": None,
"enable_cluster_autoscaler": False,
"min_count": None,
"max_count": None,
"workload_runtime": None,
"gpu_instance_profile": None,
"kubelet_config": None,
"snapshot_id": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_agent_pool_profiles(None)
dec_mc_1 = dec_1.set_up_agent_pool_profiles(mc_1)
agent_pool_profile_1 = self.models.ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name="nodepool1",
tags=None,
node_labels=None,
count=3,
vm_size="Standard_DS2_v2",
os_type="Linux",
os_sku=None,
vnet_subnet_id=None,
pod_subnet_id=None,
proximity_placement_group_id=None,
availability_zones=None,
enable_node_public_ip=False,
enable_fips=False,
node_public_ip_prefix_id=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
max_pods=None,
type="VirtualMachineScaleSets",
mode="System",
os_disk_size_gb=None,
os_disk_type=None,
enable_auto_scaling=False,
min_count=None,
max_count=None,
workload_runtime=None,
gpu_instance_profile=None,
kubelet_config=None,
creation_data=None,
)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
ground_truth_mc_1.agent_pool_profiles = [agent_pool_profile_1]
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"nodepool_name": "test_np_name1234",
"nodepool_tags": {"k1": "v1"},
"nodepool_labels": {"k1": "v1", "k2": "v2"},
"node_count": 10,
"node_vm_size": "Standard_DSx_vy",
"os_sku": None,
"vnet_subnet_id": "test_vnet_subnet_id",
"pod_subnet_id": "test_pod_subnet_id",
"ppg": "test_ppg_id",
"zones": ["tz1", "tz2"],
"enable_node_public_ip": True,
"enable_fips_image": True,
"node_public_ip_prefix_id": "test_node_public_ip_prefix_id",
"enable_encryption_at_host": True,
"enable_ultra_ssd": True,
"max_pods": 50,
"node_osdisk_size": 100,
"node_osdisk_type": "test_os_disk_type",
"enable_cluster_autoscaler": True,
"min_count": 5,
"max_count": 20,
"workload_runtime": "test_workload_runtime",
"gpu_instance_profile": "test_gpu_instance_profile",
"kubelet_config": _get_test_data_file("kubeletconfig.json"),
"linux_os_config": _get_test_data_file("linuxosconfig.json"),
"snapshot_id": "test_snapshot_id",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
mock_snapshot = Mock(
kubernetes_version="",
os_sku="snapshot_os_sku",
vm_size="snapshot_vm_size",
)
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
dec_mc_2 = dec_2.set_up_agent_pool_profiles(mc_2)
agent_pool_profile_2 = self.models.ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name="test_np_name",
tags={"k1": "v1"},
node_labels={"k1": "v1", "k2": "v2"},
count=10,
vm_size="Standard_DSx_vy",
os_type="Linux",
os_sku="snapshot_os_sku",
vnet_subnet_id="test_vnet_subnet_id",
pod_subnet_id="test_pod_subnet_id",
proximity_placement_group_id="test_ppg_id",
availability_zones=["tz1", "tz2"],
enable_node_public_ip=True,
enable_fips=True,
node_public_ip_prefix_id="test_node_public_ip_prefix_id",
enable_encryption_at_host=True,
enable_ultra_ssd=True,
max_pods=50,
type="VirtualMachineScaleSets",
mode="System",
os_disk_size_gb=100,
os_disk_type="test_os_disk_type",
enable_auto_scaling=True,
min_count=5,
max_count=20,
workload_runtime="test_workload_runtime",
gpu_instance_profile="test_gpu_instance_profile",
kubelet_config={
"cpuManagerPolicy": "static",
"cpuCfsQuota": True,
"cpuCfsQuotaPeriod": "200ms",
"imageGcHighThreshold": 90,
"imageGcLowThreshold": 70,
"topologyManagerPolicy": "best-effort",
"allowedUnsafeSysctls": ["kernel.msg*", "net.*"],
"failSwapOn": False,
"containerLogMaxFiles": 10,
"podMaxPids": 120,
"containerLogMaxSizeMB": 20,
},
linux_os_config={
"transparentHugePageEnabled": "madvise",
"transparentHugePageDefrag": "defer+madvise",
"swapFileSizeMB": 1500,
"sysctls": {
"netCoreSomaxconn": 163849,
"netIpv4TcpTwReuse": True,
"netIpv4IpLocalPortRange": "32000 60000",
},
},
creation_data=self.models.CreationData(
source_resource_id="test_snapshot_id"
),
)
ground_truth_mc_2 = self.models.ManagedCluster(location="test_location")
ground_truth_mc_2.agent_pool_profiles = [agent_pool_profile_2]
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_http_proxy_config(self):
# default value in `aks_create`
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"http_proxy_config": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_http_proxy_config(None)
dec_mc_1 = dec_1.set_up_http_proxy_config(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{"http_proxy_config": _get_test_data_file("httpproxyconfig.json")},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_http_proxy_config(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
http_proxy_config={
"httpProxy": "http://myproxy.server.com:8080/",
"httpsProxy": "https://myproxy.server.com:8080/",
"noProxy": ["localhost", "127.0.0.1"],
},
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_node_resource_group(self):
# default value in `aks_create`
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"node_resource_group": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_node_resource_group(None)
dec_mc_1 = dec_1.set_up_node_resource_group(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{"node_resource_group": "test_node_resource_group"},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_node_resource_group(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
node_resource_group="test_node_resource_group",
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_network_profile(self):
# default value in `aks_create`
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"load_balancer_sku": None,
"load_balancer_managed_outbound_ip_count": None,
"load_balancer_outbound_ips": None,
"load_balancer_outbound_ip_prefixes": None,
"load_balancer_outbound_ports": None,
"load_balancer_idle_timeout": None,
"outbound_type": None,
"network_plugin": None,
"pod_cidr": None,
"service_cidr": None,
"dns_service_ip": None,
"docker_bridge_cidr": None,
"network_policy": None,
"nat_gateway_managed_outbound_ip_count": None,
"nat_gateway_idle_timeout": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_network_profile(None)
dec_mc_1 = dec_1.set_up_network_profile(mc_1)
network_profile_1 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet", # default value in SDK
pod_cidr="10.244.0.0/16", # default value in SDK
service_cidr="10.0.0.0/16", # default value in SDK
dns_service_ip="10.0.0.10", # default value in SDK
docker_bridge_cidr="172.17.0.1/16", # default value in SDK
load_balancer_sku="standard",
outbound_type="loadBalancer",
)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location", network_profile=network_profile_1
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"load_balancer_sku": None,
"load_balancer_managed_outbound_ip_count": None,
"load_balancer_outbound_ips": None,
"load_balancer_outbound_ip_prefixes": None,
"load_balancer_outbound_ports": None,
"load_balancer_idle_timeout": None,
"outbound_type": None,
"network_plugin": "kubenet",
"pod_cidr": "10.246.0.0/16",
"service_cidr": None,
"dns_service_ip": None,
"docker_bridge_cidr": None,
"network_policy": None,
"nat_gateway_managed_outbound_ip_count": 10,
"nat_gateway_idle_timeout": 20,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_network_profile(mc_2)
nat_gateway_profile_2 = self.models.nat_gateway_models.get(
"ManagedClusterNATGatewayProfile"
)(
managed_outbound_ip_profile=self.models.nat_gateway_models.get(
"ManagedClusterManagedOutboundIPProfile"
)(count=10),
idle_timeout_in_minutes=20,
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet",
pod_cidr="10.246.0.0/16",
service_cidr=None, # overwritten to None
dns_service_ip=None, # overwritten to None
docker_bridge_cidr=None, # overwritten to None
load_balancer_sku="standard",
outbound_type="loadBalancer",
nat_gateway_profile=nat_gateway_profile_2,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location", network_profile=network_profile_2
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_pod_security_policy(self):
# default value in `aks_create`
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"enable_pod_security_policy": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_pod_security_policy(None)
dec_mc_1 = dec_1.set_up_pod_security_policy(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location", enable_pod_security_policy=False
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{"enable_pod_security_policy": True},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_pod_security_policy(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
enable_pod_security_policy=True,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_pod_identity_profile(self):
# default value in `aks_create`
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"enable_pod_identity": False,
"enable_pod_identity_with_kubenet": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_pod_identity_profile(None)
dec_mc_1 = dec_1.set_up_pod_identity_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"enable_pod_identity": True,
"enable_pod_identity_with_kubenet": True,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet"
)
mc_2 = self.models.ManagedCluster(
location="test_location", network_profile=network_profile_2
)
dec_mc_2 = dec_2.set_up_pod_identity_profile(mc_2)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet"
)
pod_identity_profile_2 = self.models.ManagedClusterPodIdentityProfile(
enabled=True,
allow_network_plugin_kubenet=True,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile_2,
pod_identity_profile=pod_identity_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_build_monitoring_addon_profile(self):
# default
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_rg_name",
"name": "test_name",
"location": "test_location",
"enable_addons": "monitoring",
"workspace_resource_id": "test_workspace_resource_id",
"enable_msi_auth_for_monitoring": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
dec_1.context.set_intermediate(
"subscription_id", "test_subscription_id"
)
with patch(
"azext_aks_preview.decorator.ensure_container_insights_for_monitoring",
return_value=None,
):
self.assertEqual(dec_1.context.get_intermediate("monitoring"), None)
monitoring_addon_profile = dec_1.build_monitoring_addon_profile()
ground_truth_monitoring_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id",
CONST_MONITORING_USING_AAD_MSI_AUTH: False,
},
)
self.assertEqual(
monitoring_addon_profile, ground_truth_monitoring_addon_profile
)
self.assertEqual(dec_1.context.get_intermediate("monitoring"), True)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_rg_name",
"name": "test_name",
"location": "test_location",
"enable_addons": "monitoring",
"workspace_resource_id": "test_workspace_resource_id",
"enable_msi_auth_for_monitoring": True,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
dec_2.context.set_intermediate(
"subscription_id", "test_subscription_id"
)
with patch(
"azext_aks_preview.decorator.ensure_container_insights_for_monitoring",
return_value=None,
):
self.assertEqual(dec_2.context.get_intermediate("monitoring"), None)
monitoring_addon_profile = dec_2.build_monitoring_addon_profile()
ground_truth_monitoring_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id",
CONST_MONITORING_USING_AAD_MSI_AUTH: True,
},
)
self.assertEqual(
monitoring_addon_profile, ground_truth_monitoring_addon_profile
)
self.assertEqual(dec_2.context.get_intermediate("monitoring"), True)
def test_build_ingress_appgw_addon_profile(self):
# default
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{},
CUSTOM_MGMT_AKS_PREVIEW,
)
self.assertEqual(
dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), None
)
ingress_appgw_addon_profile = dec_1.build_ingress_appgw_addon_profile()
ground_truth_ingress_appgw_addon_profile = (
self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
)
self.assertEqual(
ingress_appgw_addon_profile,
ground_truth_ingress_appgw_addon_profile,
)
self.assertEqual(
dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), True
)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"appgw_name": "test_appgw_name",
"appgw_subnet_prefix": "test_appgw_subnet_prefix",
"appgw_id": "test_appgw_id",
"appgw_subnet_id": "test_appgw_subnet_id",
"appgw_watch_namespace": "test_appgw_watch_namespace",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
self.assertEqual(
dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), None
)
ingress_appgw_addon_profile = dec_2.build_ingress_appgw_addon_profile()
ground_truth_ingress_appgw_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name",
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_prefix",
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id",
CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id",
CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace",
},
)
self.assertEqual(
ingress_appgw_addon_profile,
ground_truth_ingress_appgw_addon_profile,
)
self.assertEqual(
dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), True
)
# custom value
dec_3 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"appgw_name": "test_appgw_name",
"appgw_subnet_prefix": "test_appgw_subnet_prefix",
"appgw_subnet_cidr": "test_appgw_subnet_cidr",
"appgw_id": "test_appgw_id",
"appgw_subnet_id": "test_appgw_subnet_id",
"appgw_watch_namespace": "test_appgw_watch_namespace",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
self.assertEqual(
dec_3.context.get_intermediate("ingress_appgw_addon_enabled"), None
)
ingress_appgw_addon_profile = dec_3.build_ingress_appgw_addon_profile()
ground_truth_ingress_appgw_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name",
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_cidr",
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id",
CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id",
CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace",
},
)
self.assertEqual(
ingress_appgw_addon_profile,
ground_truth_ingress_appgw_addon_profile,
)
self.assertEqual(
dec_3.context.get_intermediate("ingress_appgw_addon_enabled"), True
)
def test_build_gitops_addon_profile(self):
# default
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{},
CUSTOM_MGMT_AKS_PREVIEW,
)
gitops_addon_profile = dec_1.build_gitops_addon_profile()
ground_truth_gitops_addon_profile = (
self.models.ManagedClusterAddonProfile(
enabled=True,
)
)
self.assertEqual(
gitops_addon_profile, ground_truth_gitops_addon_profile
)
def test_set_up_addon_profiles(self):
# default value in `aks_create`
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"enable_addons": None,
"workspace_resource_id": None,
"aci_subnet_name": None,
"appgw_name": None,
"appgw_subnet_cidr": None,
"appgw_id": None,
"appgw_subnet_id": None,
"appgw_watch_namespace": None,
"enable_sgxquotehelper": False,
"enable_secret_rotation": False,
"rotation_poll_interval": None,
"appgw_subnet_prefix": None,
"enable_msi_auth_for_monitoring": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_addon_profiles(None)
dec_mc_1 = dec_1.set_up_addon_profiles(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location", addon_profiles={}
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
self.assertEqual(dec_1.context.get_intermediate("monitoring"), None)
self.assertEqual(
dec_1.context.get_intermediate("enable_virtual_node"), None
)
self.assertEqual(
dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), None
)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"name": "test_name",
"resource_group_name": "test_rg_name",
"location": "test_location",
"vnet_subnet_id": "test_vnet_subnet_id",
"enable_addons": "monitoring,ingress-appgw,gitops",
"workspace_resource_id": "test_workspace_resource_id",
"enable_msi_auth_for_monitoring": True,
"appgw_name": "test_appgw_name",
"appgw_subnet_prefix": "test_appgw_subnet_prefix",
"appgw_id": "test_appgw_id",
"appgw_subnet_id": "test_appgw_subnet_id",
"appgw_watch_namespace": "test_appgw_watch_namespace",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
dec_2.context.set_intermediate(
"subscription_id", "test_subscription_id"
)
mc_2 = self.models.ManagedCluster(location="test_location")
with patch(
"azext_aks_preview.decorator.ensure_container_insights_for_monitoring",
return_value=None,
):
dec_mc_2 = dec_2.set_up_addon_profiles(mc_2)
addon_profiles_2 = {
CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id",
CONST_MONITORING_USING_AAD_MSI_AUTH: True,
},
),
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name",
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id",
CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id",
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_prefix",
CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace",
},
),
CONST_GITOPS_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
),
}
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location", addon_profiles=addon_profiles_2
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
self.assertEqual(dec_2.context.get_intermediate("monitoring"), True)
self.assertEqual(
dec_2.context.get_intermediate("enable_virtual_node"), None
)
self.assertEqual(
dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), True
)
def test_set_up_windows_profile(self):
# default value in `aks_create`
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"windows_admin_username": None,
"windows_admin_password": None,
"enable_ahub": False,
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
# fail on passing the wrong mc object
with self.assertRaises(CLIInternalError):
dec_1.set_up_windows_profile(None)
dec_mc_1 = dec_1.set_up_windows_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
# custom value
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
"windows_admin_username": "test_win_admin_name",
"windows_admin_password": "test_win_admin_password",
"enable_ahub": True,
"enable_windows_gmsa": True,
"gmsa_dns_server": "test_gmsa_dns_server",
"gmsa_root_domain_name": "test_gmsa_root_domain_name",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_windows_profile(mc_2)
windows_gmsa_profile_2 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_gmsa_dns_server",
root_domain_name="test_gmsa_root_domain_name",
)
windows_profile_2 = self.models.ManagedClusterWindowsProfile(
# [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")]
admin_username="test_win_admin_name",
admin_password="test_win_admin_password",
license_type="Windows_Server",
gmsa_profile=windows_gmsa_profile_2,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location", windows_profile=windows_profile_2
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_construct_preview_mc_profile(self):
import inspect
import paramiko
from azext_aks_preview.custom import aks_create
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_create).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"name",
"ssh_key_value",
]
self.assertEqual(positional_params, ground_truth_positional_params)
# prepare ssh key
key = paramiko.RSAKey.generate(2048)
public_key = "{} {}".format(key.get_name(), key.get_base64())
# prepare a dictionary of default parameters
raw_param_dict = {
"resource_group_name": "test_rg_name",
"name": "test_name",
"ssh_key_value": public_key,
}
raw_param_dict.update(optional_params)
from azure.cli.command_modules.acs.decorator import AKSParamDict
raw_param_dict = AKSParamDict(raw_param_dict)
# default value in `aks_create`
dec_1 = AKSPreviewCreateDecorator(
self.cmd, self.client, raw_param_dict, CUSTOM_MGMT_AKS_PREVIEW
)
mock_profile = Mock(
get_subscription_id=Mock(return_value="1234-5678-9012")
)
with patch(
"azure.cli.command_modules.acs.decorator.get_rg_location",
return_value="test_location",
), patch(
"azure.cli.command_modules.acs.decorator.Profile",
return_value=mock_profile,
):
dec_mc_1 = dec_1.construct_preview_mc_profile()
agent_pool_profile_1 = self.models.ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name="nodepool1",
# tags=None,
# node_labels=None,
count=3,
vm_size="Standard_DS2_v2",
os_type="Linux",
enable_node_public_ip=False,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
type="VirtualMachineScaleSets",
mode="System",
enable_auto_scaling=False,
enable_fips=False,
)
ssh_config_1 = self.models.ContainerServiceSshConfiguration(
public_keys=[
self.models.ContainerServiceSshPublicKey(key_data=public_key)
]
)
linux_profile_1 = self.models.ContainerServiceLinuxProfile(
admin_username="azureuser", ssh=ssh_config_1
)
network_profile_1 = self.models.ContainerServiceNetworkProfile(
load_balancer_sku="standard",
)
identity_1 = self.models.ManagedClusterIdentity(type="SystemAssigned")
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
dns_prefix="testname-testrgname-1234-5",
kubernetes_version="",
addon_profiles={},
enable_rbac=True,
agent_pool_profiles=[agent_pool_profile_1],
linux_profile=linux_profile_1,
network_profile=network_profile_1,
identity=identity_1,
disable_local_accounts=False,
enable_pod_security_policy=False,
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
raw_param_dict.print_usage_statistics()
def test_create_mc(self):
mc_1 = self.models.ManagedCluster(
location="test_location",
addon_profiles={
CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_USING_AAD_MSI_AUTH: True,
},
)
},
)
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_rg_name",
"name": "test_name",
"enable_managed_identity": True,
"no_wait": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
dec_1.context.attach_mc(mc_1)
dec_1.context.set_intermediate(
"monitoring", True, overwrite_exists=True
)
dec_1.context.set_intermediate(
"subscription_id", "test_subscription_id", overwrite_exists=True
)
resp = requests.Response()
resp.status_code = 500
err = CloudError(resp)
err.message = "not found in Active Directory tenant"
# fail on mock CloudError
with self.assertRaises(CloudError), patch("time.sleep"), patch(
"azure.cli.command_modules.acs.decorator.AKSCreateDecorator.create_mc"
), patch(
"azext_aks_preview.decorator.ensure_container_insights_for_monitoring",
side_effect=err,
) as ensure_monitoring:
dec_1.create_mc(mc_1)
ensure_monitoring.assert_called_with(
self.cmd,
mc_1.addon_profiles[CONST_MONITORING_ADDON_NAME],
"test_subscription_id",
"test_rg_name",
"test_name",
"test_location",
remove_monitoring=False,
aad_route=True,
create_dcr=False,
create_dcra=True,
)
class AKSPreviewUpdateDecoratorTestCase(unittest.TestCase):
def setUp(self):
# manually register CUSTOM_MGMT_AKS_PREVIEW
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW)
self.client = MockClient()
| 37.926281 | 126 | 0.607083 |
import importlib
import unittest
from unittest.mock import Mock, patch
import requests
from azext_aks_preview.__init__ import register_aks_preview_resource_type
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from azext_aks_preview._consts import (
ADDONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_GITOPS_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY,
CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
CONST_ROTATION_POLL_INTERVAL,
CONST_SECRET_ROTATION_ENABLED,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
)
from azext_aks_preview.decorator import (
AKSPreviewContext,
AKSPreviewCreateDecorator,
AKSPreviewModels,
AKSPreviewUpdateDecorator,
)
from azext_aks_preview.tests.latest.mocks import MockCLI, MockClient, MockCmd
from azext_aks_preview.tests.latest.test_aks_commands import _get_test_data_file
from azure.cli.command_modules.acs._consts import (
DecoratorEarlyExitException,
DecoratorMode,
)
from azure.cli.core.azclierror import (
CLIInternalError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
RequiredArgumentMissingError,
)
from msrestazure.azure_exceptions import CloudError
class AKSPreviewModelsTestCase(unittest.TestCase):
def setUp(self):
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
def test_models(self):
models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW)
from azure.cli.core.profiles._shared import AZURE_API_PROFILES
sdk_profile = AZURE_API_PROFILES["latest"][CUSTOM_MGMT_AKS_PREVIEW]
api_version = sdk_profile.default_api_version
module_name = "azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.v{}.models".format(
api_version.replace("-", "_")
)
module = importlib.import_module(module_name)
self.assertEqual(models.KubeletConfig, getattr(module, "KubeletConfig"))
self.assertEqual(models.LinuxOSConfig, getattr(module, "LinuxOSConfig"))
self.assertEqual(
models.ManagedClusterHTTPProxyConfig,
getattr(module, "ManagedClusterHTTPProxyConfig"),
)
self.assertEqual(
models.ManagedClusterPodIdentityProfile,
getattr(module, "ManagedClusterPodIdentityProfile"),
)
self.assertEqual(
models.WindowsGmsaProfile, getattr(module, "WindowsGmsaProfile")
)
self.assertEqual(models.CreationData, getattr(module, "CreationData"))
self.assertEqual(
models.nat_gateway_models.get("ManagedClusterNATGatewayProfile"),
getattr(module, "ManagedClusterNATGatewayProfile"),
)
self.assertEqual(
models.nat_gateway_models.get(
"ManagedClusterManagedOutboundIPProfile"
),
getattr(module, "ManagedClusterManagedOutboundIPProfile"),
)
class AKSPreviewContextTestCase(unittest.TestCase):
def setUp(self):
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW)
def test__get_vm_set_type(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{
"vm_set_type": None,
"kubernetes_version": "",
"enable_vmss": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1._get_vm_set_type(read_only=True), None)
self.assertEqual(ctx_1.get_vm_set_type(), "VirtualMachineScaleSets")
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_ap_name", type="test_mc_vm_set_type"
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_vm_set_type(), "test_mc_vm_set_type")
ctx_2 = AKSPreviewContext(
self.cmd,
{
"vm_set_type": "availabilityset",
"kubernetes_version": "",
"enable_vmss": True,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(InvalidArgumentValueError):
self.assertEqual(ctx_2.get_vm_set_type(), "AvailabilitySet")
ctx_3 = AKSPreviewContext(
self.cmd,
{
"vm_set_type": None,
"kubernetes_version": "",
"enable_vmss": True,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_3.get_vm_set_type(), "VirtualMachineScaleSets")
def test_get_zones(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"node_zones": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_zones(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
availability_zones=["test_mc_zones1", "test_mc_zones2"],
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_zones(), ["test_mc_zones1", "test_mc_zones2"]
)
ctx_2 = AKSPreviewContext(
self.cmd,
{"node_zones": ["test_zones1", "test_zones2"]},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_2.get_zones(), ["test_zones1", "test_zones2"])
def test_get_pod_subnet_id(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"pod_subnet_id": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_pod_subnet_id(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name", pod_subnet_id="test_mc_pod_subnet_id"
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_pod_subnet_id(), "test_mc_pod_subnet_id")
def test_get_enable_fips_image(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_fips_image": False},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_fips_image(), False)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
enable_fips=True,
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_fips_image(), True)
def test_get_workload_runtime(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"workload_runtime": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_workload_runtime(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
workload_runtime="test_mc_workload_runtime",
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_workload_runtime(), "test_mc_workload_runtime"
)
def test_get_gpu_instance_profile(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"gpu_instance_profile": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_gpu_instance_profile(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
gpu_instance_profile="test_mc_gpu_instance_profile",
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_gpu_instance_profile(), "test_mc_gpu_instance_profile"
)
def test_get_kubelet_config(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"kubelet_config": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_kubelet_config(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
kubelet_config=self.models.KubeletConfig(pod_max_pids=100),
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_kubelet_config(),
self.models.KubeletConfig(pod_max_pids=100),
)
ctx_2 = AKSPreviewContext(
self.cmd,
{"kubelet_config": "fake-path"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_kubelet_config()
ctx_3 = AKSPreviewContext(
self.cmd,
{"kubelet_config": _get_test_data_file("invalidconfig.json")},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_kubelet_config()
def test_get_linux_os_config(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"linux_os_config": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_linux_os_config(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name",
linux_os_config=self.models.LinuxOSConfig(swap_file_size_mb=200),
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_linux_os_config(),
self.models.LinuxOSConfig(swap_file_size_mb=200),
)
ctx_2 = AKSPreviewContext(
self.cmd,
{"linux_os_config": "fake-path"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_linux_os_config()
ctx_3 = AKSPreviewContext(
self.cmd,
{"linux_os_config": _get_test_data_file("invalidconfig.json")},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_linux_os_config()
def test_get_http_proxy_config(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"http_proxy_config": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_http_proxy_config(), None)
mc = self.models.ManagedCluster(
location="test_location",
http_proxy_config=self.models.ManagedClusterHTTPProxyConfig(
http_proxy="test_http_proxy"
),
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_http_proxy_config(),
self.models.ManagedClusterHTTPProxyConfig(
http_proxy="test_http_proxy"
),
)
ctx_2 = AKSPreviewContext(
self.cmd,
{"http_proxy_config": "fake-path"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_http_proxy_config()
ctx_3 = AKSPreviewContext(
self.cmd,
{"http_proxy_config": _get_test_data_file("invalidconfig.json")},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_http_proxy_config()
def test_get_node_resource_group(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"node_resource_group": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_node_resource_group(), None)
mc = self.models.ManagedCluster(
location="test_location",
node_resource_group="test_node_resource_group",
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_node_resource_group(), "test_node_resource_group"
)
def test_get_nat_gateway_managed_outbound_ip_count(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"nat_gateway_managed_outbound_ip_count": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(
ctx_1.get_nat_gateway_managed_outbound_ip_count(), None
)
nat_gateway_profile = self.models.nat_gateway_models.get(
"ManagedClusterNATGatewayProfile"
)(
managed_outbound_ip_profile=self.models.nat_gateway_models.get(
"ManagedClusterManagedOutboundIPProfile"
)(count=10)
)
network_profile = self.models.ContainerServiceNetworkProfile(
nat_gateway_profile=nat_gateway_profile
)
mc = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_nat_gateway_managed_outbound_ip_count(), 10)
def test_get_nat_gateway_idle_timeout(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"nat_gateway_idle_timeout": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_nat_gateway_idle_timeout(), None)
nat_gateway_profile = self.models.nat_gateway_models.get(
"ManagedClusterNATGatewayProfile"
)(
idle_timeout_in_minutes=20,
)
network_profile = self.models.ContainerServiceNetworkProfile(
nat_gateway_profile=nat_gateway_profile
)
mc = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_nat_gateway_idle_timeout(), 20)
def test_get_enable_pod_security_policy(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_pod_security_policy": False},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_pod_security_policy(), False)
mc = self.models.ManagedCluster(
location="test_location",
enable_pod_security_policy=True,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_pod_security_policy(), True)
def test_get_enable_managed_identity(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_managed_identity": False, "enable_pod_identity": True},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(ctx_1.get_enable_managed_identity(), False)
def test_get_enable_pod_identity(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_pod_identity": False},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_pod_identity(), False)
pod_identity_profile = self.models.ManagedClusterPodIdentityProfile(
enabled=True
)
mc = self.models.ManagedCluster(
location="test_location",
pod_identity_profile=pod_identity_profile,
)
ctx_1.attach_mc(mc)
with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(ctx_1.get_enable_pod_identity(), True)
ctx_2 = AKSPreviewContext(
self.cmd,
{
"enable_managed_identity": True,
"enable_pod_identity": True,
"enable_pod_identity_with_kubenet": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet"
)
mc_2 = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile_2,
)
ctx_2.attach_mc(mc_2)
with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(ctx_2.get_enable_pod_identity(), True)
def test_get_enable_pod_identity_with_kubenet(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"enable_pod_identity_with_kubenet": False},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_pod_identity_with_kubenet(), False)
pod_identity_profile = self.models.ManagedClusterPodIdentityProfile(
enabled=True,
allow_network_plugin_kubenet=True,
)
mc = self.models.ManagedCluster(
location="test_location",
pod_identity_profile=pod_identity_profile,
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_pod_identity_with_kubenet(), True)
ctx_2 = AKSPreviewContext(
self.cmd,
{
"enable_managed_identity": True,
"enable_pod_identity": True,
"enable_pod_identity_with_kubenet": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet"
)
mc_2 = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile_2,
)
ctx_2.attach_mc(mc_2)
with self.assertRaises(RequiredArgumentMissingError):
self.assertEqual(
ctx_2.get_enable_pod_identity_with_kubenet(), False
)
def test_get_addon_consts(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
addon_consts = ctx_1.get_addon_consts()
ground_truth_addon_consts = {
"ADDONS": ADDONS,
"CONST_ACC_SGX_QUOTE_HELPER_ENABLED": CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
"CONST_AZURE_POLICY_ADDON_NAME": CONST_AZURE_POLICY_ADDON_NAME,
"CONST_CONFCOM_ADDON_NAME": CONST_CONFCOM_ADDON_NAME,
"CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME": CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
"CONST_INGRESS_APPGW_ADDON_NAME": CONST_INGRESS_APPGW_ADDON_NAME,
"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID": CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
"CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME": CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
"CONST_INGRESS_APPGW_SUBNET_CIDR": CONST_INGRESS_APPGW_SUBNET_CIDR,
"CONST_INGRESS_APPGW_SUBNET_ID": CONST_INGRESS_APPGW_SUBNET_ID,
"CONST_INGRESS_APPGW_WATCH_NAMESPACE": CONST_INGRESS_APPGW_WATCH_NAMESPACE,
"CONST_KUBE_DASHBOARD_ADDON_NAME": CONST_KUBE_DASHBOARD_ADDON_NAME,
"CONST_MONITORING_ADDON_NAME": CONST_MONITORING_ADDON_NAME,
"CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID": CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
"CONST_OPEN_SERVICE_MESH_ADDON_NAME": CONST_OPEN_SERVICE_MESH_ADDON_NAME,
"CONST_VIRTUAL_NODE_ADDON_NAME": CONST_VIRTUAL_NODE_ADDON_NAME,
"CONST_VIRTUAL_NODE_SUBNET_NAME": CONST_VIRTUAL_NODE_SUBNET_NAME,
"CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME": CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
"CONST_SECRET_ROTATION_ENABLED": CONST_SECRET_ROTATION_ENABLED,
"CONST_ROTATION_POLL_INTERVAL": CONST_ROTATION_POLL_INTERVAL,
"CONST_GITOPS_ADDON_NAME": CONST_GITOPS_ADDON_NAME,
"CONST_MONITORING_USING_AAD_MSI_AUTH": CONST_MONITORING_USING_AAD_MSI_AUTH,
}
self.assertEqual(addon_consts, ground_truth_addon_consts)
def test_get_appgw_subnet_prefix(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{
"appgw_subnet_prefix": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_appgw_subnet_prefix(), None)
addon_profiles_1 = {
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_prefix"
},
)
}
mc = self.models.ManagedCluster(
location="test_location", addon_profiles=addon_profiles_1
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_appgw_subnet_prefix(), "test_appgw_subnet_prefix"
)
def test_get_enable_msi_auth_for_monitoring(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{
"enable_msi_auth_for_monitoring": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_msi_auth_for_monitoring(), False)
addon_profiles_1 = {
CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={CONST_MONITORING_USING_AAD_MSI_AUTH: True},
)
}
mc = self.models.ManagedCluster(
location="test_location", addon_profiles=addon_profiles_1
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_enable_msi_auth_for_monitoring(), True)
def test_get_no_wait(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{
"no_wait": True,
"enable_msi_auth_for_monitoring": True,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
ctx_1.set_intermediate("monitoring", True, overwrite_exists=True)
self.assertEqual(ctx_1.get_no_wait(), False)
def test_validate_gmsa_options(self):
ctx = AKSPreviewContext(
self.cmd,
{},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
ctx._AKSPreviewContext__validate_gmsa_options(False, None, None, False)
ctx._AKSPreviewContext__validate_gmsa_options(True, None, None, True)
with patch(
"azext_aks_preview.decorator.prompt_y_n",
return_value=False,
), self.assertRaises(DecoratorEarlyExitException):
ctx._AKSPreviewContext__validate_gmsa_options(
True, None, None, False
)
with self.assertRaises(RequiredArgumentMissingError):
ctx._AKSPreviewContext__validate_gmsa_options(
True, "test_gmsa_dns_server", None, False
)
with self.assertRaises(RequiredArgumentMissingError):
ctx._AKSPreviewContext__validate_gmsa_options(
False, None, "test_gmsa_root_domain_name", False
)
def test_get_enable_windows_gmsa(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{
"enable_windows_gmsa": False,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_enable_windows_gmsa(), False)
windows_gmsa_profile_1 = self.models.WindowsGmsaProfile(enabled=True)
windows_profile_1 = self.models.ManagedClusterWindowsProfile(
admin_username="test_admin_username",
gmsa_profile=windows_gmsa_profile_1,
)
mc = self.models.ManagedCluster(
location="test_location", windows_profile=windows_profile_1
)
ctx_1.attach_mc(mc)
with patch(
"azext_aks_preview.decorator.prompt_y_n",
return_value=True,
):
self.assertEqual(ctx_1.get_enable_windows_gmsa(), True)
def test_get_gmsa_dns_server_and_root_domain_name(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(
ctx_1.get_gmsa_dns_server_and_root_domain_name(), (None, None)
)
windows_gmsa_profile_1 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_dns_server",
root_domain_name="test_root_domain_name",
)
windows_profile_1 = self.models.ManagedClusterWindowsProfile(
admin_username="test_admin_username",
gmsa_profile=windows_gmsa_profile_1,
)
mc = self.models.ManagedCluster(
location="test_location", windows_profile=windows_profile_1
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_gmsa_dns_server_and_root_domain_name(),
("test_dns_server", "test_root_domain_name"),
)
ctx_2 = AKSPreviewContext(
self.cmd,
{
"enable_windows_gmsa": True,
"gmsa_dns_server": "test_gmsa_dns_server",
"gmsa_root_domain_name": "test_gmsa_root_domain_name",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
windows_gmsa_profile_2 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_dns_server",
root_domain_name=None,
)
windows_profile_2 = self.models.ManagedClusterWindowsProfile(
admin_username="test_admin_username",
gmsa_profile=windows_gmsa_profile_2,
)
mc = self.models.ManagedCluster(
location="test_location", windows_profile=windows_profile_2
)
ctx_2.attach_mc(mc)
with self.assertRaises(CLIInternalError):
ctx_2.get_gmsa_dns_server_and_root_domain_name()
def test_get_snapshot_id(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{
"snapshot_id": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_snapshot_id(), None)
creation_data = self.models.CreationData(
source_resource_id="test_source_resource_id"
)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name", creation_data=creation_data
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_snapshot_id(), "test_source_resource_id")
def test_get_snapshot(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{
"snapshot_id": "test_source_resource_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock()
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_1.get_snapshot(), mock_snapshot)
self.assertEqual(ctx_1.get_snapshot(), mock_snapshot)
def test_get_kubernetes_version(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"kubernetes_version": ""},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_kubernetes_version(), "")
mc = self.models.ManagedCluster(
location="test_location",
kubernetes_version="test_mc_kubernetes_version",
)
ctx_1.attach_mc(mc)
self.assertEqual(
ctx_1.get_kubernetes_version(), "test_mc_kubernetes_version"
)
ctx_2 = AKSPreviewContext(
self.cmd,
{"kubernetes_version": "", "snapshot_id": "test_snapshot_id"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(kubernetes_version="test_kubernetes_version")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(
ctx_2.get_kubernetes_version(), "test_kubernetes_version"
)
ctx_3 = AKSPreviewContext(
self.cmd,
{
"kubernetes_version": "custom_kubernetes_version",
"snapshot_id": "test_snapshot_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(kubernetes_version="test_kubernetes_version")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(
ctx_3.get_kubernetes_version(), "custom_kubernetes_version"
)
def test_get_os_sku(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"os_sku": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_os_sku(), None)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name", os_sku="test_mc_os_sku"
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_os_sku(), "test_mc_os_sku")
ctx_2 = AKSPreviewContext(
self.cmd,
{"os_sku": None, "snapshot_id": "test_snapshot_id"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(os_sku="test_os_sku")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_2.get_os_sku(), "test_os_sku")
ctx_3 = AKSPreviewContext(
self.cmd,
{
"os_sku": "custom_os_sku",
"snapshot_id": "test_snapshot_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(os_sku="test_os_sku")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_3.get_os_sku(), "custom_os_sku")
def test_get_node_vm_size(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{"node_vm_size": None},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1.get_node_vm_size(), "Standard_DS2_v2")
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name="test_nodepool_name", vm_size="Standard_ABCD_v2"
)
mc = self.models.ManagedCluster(
location="test_location", agent_pool_profiles=[agent_pool_profile]
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_node_vm_size(), "Standard_ABCD_v2")
ctx_2 = AKSPreviewContext(
self.cmd,
{"node_vm_size": None, "snapshot_id": "test_snapshot_id"},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(vm_size="test_vm_size")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_2.get_node_vm_size(), "test_vm_size")
ctx_3 = AKSPreviewContext(
self.cmd,
{
"node_vm_size": "custom_node_vm_size",
"snapshot_id": "test_snapshot_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
mock_snapshot = Mock(vm_size="test_vm_size")
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
self.assertEqual(ctx_3.get_node_vm_size(), "custom_node_vm_size")
def test_test_get_outbound_type(self):
ctx_1 = AKSPreviewContext(
self.cmd,
{
"outbound_type": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
self.assertEqual(ctx_1._get_outbound_type(read_only=True), None)
self.assertEqual(ctx_1.get_outbound_type(), "loadBalancer")
network_profile_1 = self.models.ContainerServiceNetworkProfile(
outbound_type="test_outbound_type"
)
mc = self.models.ManagedCluster(
location="test_location", network_profile=network_profile_1
)
ctx_1.attach_mc(mc)
self.assertEqual(ctx_1.get_outbound_type(), "test_outbound_type")
ctx_2 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY,
"load_balancer_sku": "basic",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(InvalidArgumentValueError):
ctx_2.get_outbound_type()
ctx_3 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,
"load_balancer_sku": "basic",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_outbound_type()
ctx_4 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY,
"vnet_subnet_id": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(RequiredArgumentMissingError):
ctx_4.get_outbound_type()
ctx_5 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
"vnet_subnet_id": None,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(RequiredArgumentMissingError):
ctx_5.get_outbound_type()
ctx_6 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
"vnet_subnet_id": "test_vnet_subnet_id",
"load_balancer_managed_outbound_ip_count": 10,
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_6.get_outbound_type()
ctx_7 = AKSPreviewContext(
self.cmd,
{
"outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING,
"vnet_subnet_id": "test_vnet_subnet_id",
},
self.models,
decorator_mode=DecoratorMode.CREATE,
)
load_balancer_profile = self.models.lb_models.get(
"ManagedClusterLoadBalancerProfile"
)(
outbound_ip_prefixes=self.models.lb_models.get(
"ManagedClusterLoadBalancerProfileOutboundIPPrefixes"
)(
public_ip_prefixes=[
self.models.lb_models.get("ResourceReference")(
id="test_public_ip_prefix"
)
]
)
)
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_7.get_outbound_type(
load_balancer_profile=load_balancer_profile,
)
class AKSPreviewCreateDecoratorTestCase(unittest.TestCase):
def setUp(self):
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW)
self.client = MockClient()
def test_set_up_agent_pool_profiles(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"nodepool_name": "nodepool1",
"nodepool_tags": None,
"nodepool_labels": None,
"node_count": 3,
"node_vm_size": "Standard_DS2_v2",
"os_sku": None,
"vnet_subnet_id": None,
"pod_subnet_id": None,
"ppg": None,
"zones": None,
"enable_node_public_ip": False,
"enable_fips_image": False,
"node_public_ip_prefix_id": None,
"enable_encryption_at_host": False,
"enable_ultra_ssd": False,
"max_pods": 0,
"node_osdisk_size": 0,
"node_osdisk_type": None,
"enable_cluster_autoscaler": False,
"min_count": None,
"max_count": None,
"workload_runtime": None,
"gpu_instance_profile": None,
"kubelet_config": None,
"snapshot_id": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
with self.assertRaises(CLIInternalError):
dec_1.set_up_agent_pool_profiles(None)
dec_mc_1 = dec_1.set_up_agent_pool_profiles(mc_1)
agent_pool_profile_1 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool1",
tags=None,
node_labels=None,
count=3,
vm_size="Standard_DS2_v2",
os_type="Linux",
os_sku=None,
vnet_subnet_id=None,
pod_subnet_id=None,
proximity_placement_group_id=None,
availability_zones=None,
enable_node_public_ip=False,
enable_fips=False,
node_public_ip_prefix_id=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
max_pods=None,
type="VirtualMachineScaleSets",
mode="System",
os_disk_size_gb=None,
os_disk_type=None,
enable_auto_scaling=False,
min_count=None,
max_count=None,
workload_runtime=None,
gpu_instance_profile=None,
kubelet_config=None,
creation_data=None,
)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
ground_truth_mc_1.agent_pool_profiles = [agent_pool_profile_1]
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"nodepool_name": "test_np_name1234",
"nodepool_tags": {"k1": "v1"},
"nodepool_labels": {"k1": "v1", "k2": "v2"},
"node_count": 10,
"node_vm_size": "Standard_DSx_vy",
"os_sku": None,
"vnet_subnet_id": "test_vnet_subnet_id",
"pod_subnet_id": "test_pod_subnet_id",
"ppg": "test_ppg_id",
"zones": ["tz1", "tz2"],
"enable_node_public_ip": True,
"enable_fips_image": True,
"node_public_ip_prefix_id": "test_node_public_ip_prefix_id",
"enable_encryption_at_host": True,
"enable_ultra_ssd": True,
"max_pods": 50,
"node_osdisk_size": 100,
"node_osdisk_type": "test_os_disk_type",
"enable_cluster_autoscaler": True,
"min_count": 5,
"max_count": 20,
"workload_runtime": "test_workload_runtime",
"gpu_instance_profile": "test_gpu_instance_profile",
"kubelet_config": _get_test_data_file("kubeletconfig.json"),
"linux_os_config": _get_test_data_file("linuxosconfig.json"),
"snapshot_id": "test_snapshot_id",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
mock_snapshot = Mock(
kubernetes_version="",
os_sku="snapshot_os_sku",
vm_size="snapshot_vm_size",
)
with patch(
"azext_aks_preview.decorator._get_snapshot",
return_value=mock_snapshot,
):
dec_mc_2 = dec_2.set_up_agent_pool_profiles(mc_2)
agent_pool_profile_2 = self.models.ManagedClusterAgentPoolProfile(
name="test_np_name",
tags={"k1": "v1"},
node_labels={"k1": "v1", "k2": "v2"},
count=10,
vm_size="Standard_DSx_vy",
os_type="Linux",
os_sku="snapshot_os_sku",
vnet_subnet_id="test_vnet_subnet_id",
pod_subnet_id="test_pod_subnet_id",
proximity_placement_group_id="test_ppg_id",
availability_zones=["tz1", "tz2"],
enable_node_public_ip=True,
enable_fips=True,
node_public_ip_prefix_id="test_node_public_ip_prefix_id",
enable_encryption_at_host=True,
enable_ultra_ssd=True,
max_pods=50,
type="VirtualMachineScaleSets",
mode="System",
os_disk_size_gb=100,
os_disk_type="test_os_disk_type",
enable_auto_scaling=True,
min_count=5,
max_count=20,
workload_runtime="test_workload_runtime",
gpu_instance_profile="test_gpu_instance_profile",
kubelet_config={
"cpuManagerPolicy": "static",
"cpuCfsQuota": True,
"cpuCfsQuotaPeriod": "200ms",
"imageGcHighThreshold": 90,
"imageGcLowThreshold": 70,
"topologyManagerPolicy": "best-effort",
"allowedUnsafeSysctls": ["kernel.msg*", "net.*"],
"failSwapOn": False,
"containerLogMaxFiles": 10,
"podMaxPids": 120,
"containerLogMaxSizeMB": 20,
},
linux_os_config={
"transparentHugePageEnabled": "madvise",
"transparentHugePageDefrag": "defer+madvise",
"swapFileSizeMB": 1500,
"sysctls": {
"netCoreSomaxconn": 163849,
"netIpv4TcpTwReuse": True,
"netIpv4IpLocalPortRange": "32000 60000",
},
},
creation_data=self.models.CreationData(
source_resource_id="test_snapshot_id"
),
)
ground_truth_mc_2 = self.models.ManagedCluster(location="test_location")
ground_truth_mc_2.agent_pool_profiles = [agent_pool_profile_2]
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_http_proxy_config(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"http_proxy_config": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
with self.assertRaises(CLIInternalError):
dec_1.set_up_http_proxy_config(None)
dec_mc_1 = dec_1.set_up_http_proxy_config(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{"http_proxy_config": _get_test_data_file("httpproxyconfig.json")},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_http_proxy_config(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
http_proxy_config={
"httpProxy": "http://myproxy.server.com:8080/",
"httpsProxy": "https://myproxy.server.com:8080/",
"noProxy": ["localhost", "127.0.0.1"],
},
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_node_resource_group(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"node_resource_group": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
with self.assertRaises(CLIInternalError):
dec_1.set_up_node_resource_group(None)
dec_mc_1 = dec_1.set_up_node_resource_group(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{"node_resource_group": "test_node_resource_group"},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_node_resource_group(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
node_resource_group="test_node_resource_group",
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_network_profile(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"load_balancer_sku": None,
"load_balancer_managed_outbound_ip_count": None,
"load_balancer_outbound_ips": None,
"load_balancer_outbound_ip_prefixes": None,
"load_balancer_outbound_ports": None,
"load_balancer_idle_timeout": None,
"outbound_type": None,
"network_plugin": None,
"pod_cidr": None,
"service_cidr": None,
"dns_service_ip": None,
"docker_bridge_cidr": None,
"network_policy": None,
"nat_gateway_managed_outbound_ip_count": None,
"nat_gateway_idle_timeout": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
with self.assertRaises(CLIInternalError):
dec_1.set_up_network_profile(None)
dec_mc_1 = dec_1.set_up_network_profile(mc_1)
network_profile_1 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet",
pod_cidr="10.244.0.0/16",
service_cidr="10.0.0.0/16",
dns_service_ip="10.0.0.10",
docker_bridge_cidr="172.17.0.1/16",
load_balancer_sku="standard",
outbound_type="loadBalancer",
)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location", network_profile=network_profile_1
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"load_balancer_sku": None,
"load_balancer_managed_outbound_ip_count": None,
"load_balancer_outbound_ips": None,
"load_balancer_outbound_ip_prefixes": None,
"load_balancer_outbound_ports": None,
"load_balancer_idle_timeout": None,
"outbound_type": None,
"network_plugin": "kubenet",
"pod_cidr": "10.246.0.0/16",
"service_cidr": None,
"dns_service_ip": None,
"docker_bridge_cidr": None,
"network_policy": None,
"nat_gateway_managed_outbound_ip_count": 10,
"nat_gateway_idle_timeout": 20,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_network_profile(mc_2)
nat_gateway_profile_2 = self.models.nat_gateway_models.get(
"ManagedClusterNATGatewayProfile"
)(
managed_outbound_ip_profile=self.models.nat_gateway_models.get(
"ManagedClusterManagedOutboundIPProfile"
)(count=10),
idle_timeout_in_minutes=20,
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet",
pod_cidr="10.246.0.0/16",
service_cidr=None,
dns_service_ip=None,
docker_bridge_cidr=None,
load_balancer_sku="standard",
outbound_type="loadBalancer",
nat_gateway_profile=nat_gateway_profile_2,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location", network_profile=network_profile_2
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_pod_security_policy(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"enable_pod_security_policy": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
with self.assertRaises(CLIInternalError):
dec_1.set_up_pod_security_policy(None)
dec_mc_1 = dec_1.set_up_pod_security_policy(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location", enable_pod_security_policy=False
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{"enable_pod_security_policy": True},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_pod_security_policy(mc_2)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
enable_pod_security_policy=True,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_set_up_pod_identity_profile(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"enable_pod_identity": False,
"enable_pod_identity_with_kubenet": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
with self.assertRaises(CLIInternalError):
dec_1.set_up_pod_identity_profile(None)
dec_mc_1 = dec_1.set_up_pod_identity_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"enable_managed_identity": True,
"enable_pod_identity": True,
"enable_pod_identity_with_kubenet": True,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet"
)
mc_2 = self.models.ManagedCluster(
location="test_location", network_profile=network_profile_2
)
dec_mc_2 = dec_2.set_up_pod_identity_profile(mc_2)
network_profile_2 = self.models.ContainerServiceNetworkProfile(
network_plugin="kubenet"
)
pod_identity_profile_2 = self.models.ManagedClusterPodIdentityProfile(
enabled=True,
allow_network_plugin_kubenet=True,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location",
network_profile=network_profile_2,
pod_identity_profile=pod_identity_profile_2,
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_build_monitoring_addon_profile(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_rg_name",
"name": "test_name",
"location": "test_location",
"enable_addons": "monitoring",
"workspace_resource_id": "test_workspace_resource_id",
"enable_msi_auth_for_monitoring": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
dec_1.context.set_intermediate(
"subscription_id", "test_subscription_id"
)
with patch(
"azext_aks_preview.decorator.ensure_container_insights_for_monitoring",
return_value=None,
):
self.assertEqual(dec_1.context.get_intermediate("monitoring"), None)
monitoring_addon_profile = dec_1.build_monitoring_addon_profile()
ground_truth_monitoring_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id",
CONST_MONITORING_USING_AAD_MSI_AUTH: False,
},
)
self.assertEqual(
monitoring_addon_profile, ground_truth_monitoring_addon_profile
)
self.assertEqual(dec_1.context.get_intermediate("monitoring"), True)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_rg_name",
"name": "test_name",
"location": "test_location",
"enable_addons": "monitoring",
"workspace_resource_id": "test_workspace_resource_id",
"enable_msi_auth_for_monitoring": True,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
dec_2.context.set_intermediate(
"subscription_id", "test_subscription_id"
)
with patch(
"azext_aks_preview.decorator.ensure_container_insights_for_monitoring",
return_value=None,
):
self.assertEqual(dec_2.context.get_intermediate("monitoring"), None)
monitoring_addon_profile = dec_2.build_monitoring_addon_profile()
ground_truth_monitoring_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id",
CONST_MONITORING_USING_AAD_MSI_AUTH: True,
},
)
self.assertEqual(
monitoring_addon_profile, ground_truth_monitoring_addon_profile
)
self.assertEqual(dec_2.context.get_intermediate("monitoring"), True)
def test_build_ingress_appgw_addon_profile(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{},
CUSTOM_MGMT_AKS_PREVIEW,
)
self.assertEqual(
dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), None
)
ingress_appgw_addon_profile = dec_1.build_ingress_appgw_addon_profile()
ground_truth_ingress_appgw_addon_profile = (
self.models.ManagedClusterAddonProfile(
enabled=True,
config={},
)
)
self.assertEqual(
ingress_appgw_addon_profile,
ground_truth_ingress_appgw_addon_profile,
)
self.assertEqual(
dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), True
)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"appgw_name": "test_appgw_name",
"appgw_subnet_prefix": "test_appgw_subnet_prefix",
"appgw_id": "test_appgw_id",
"appgw_subnet_id": "test_appgw_subnet_id",
"appgw_watch_namespace": "test_appgw_watch_namespace",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
self.assertEqual(
dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), None
)
ingress_appgw_addon_profile = dec_2.build_ingress_appgw_addon_profile()
ground_truth_ingress_appgw_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name",
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_prefix",
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id",
CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id",
CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace",
},
)
self.assertEqual(
ingress_appgw_addon_profile,
ground_truth_ingress_appgw_addon_profile,
)
self.assertEqual(
dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), True
)
dec_3 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"appgw_name": "test_appgw_name",
"appgw_subnet_prefix": "test_appgw_subnet_prefix",
"appgw_subnet_cidr": "test_appgw_subnet_cidr",
"appgw_id": "test_appgw_id",
"appgw_subnet_id": "test_appgw_subnet_id",
"appgw_watch_namespace": "test_appgw_watch_namespace",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
self.assertEqual(
dec_3.context.get_intermediate("ingress_appgw_addon_enabled"), None
)
ingress_appgw_addon_profile = dec_3.build_ingress_appgw_addon_profile()
ground_truth_ingress_appgw_addon_profile = self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name",
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_cidr",
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id",
CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id",
CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace",
},
)
self.assertEqual(
ingress_appgw_addon_profile,
ground_truth_ingress_appgw_addon_profile,
)
self.assertEqual(
dec_3.context.get_intermediate("ingress_appgw_addon_enabled"), True
)
def test_build_gitops_addon_profile(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{},
CUSTOM_MGMT_AKS_PREVIEW,
)
gitops_addon_profile = dec_1.build_gitops_addon_profile()
ground_truth_gitops_addon_profile = (
self.models.ManagedClusterAddonProfile(
enabled=True,
)
)
self.assertEqual(
gitops_addon_profile, ground_truth_gitops_addon_profile
)
def test_set_up_addon_profiles(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"enable_addons": None,
"workspace_resource_id": None,
"aci_subnet_name": None,
"appgw_name": None,
"appgw_subnet_cidr": None,
"appgw_id": None,
"appgw_subnet_id": None,
"appgw_watch_namespace": None,
"enable_sgxquotehelper": False,
"enable_secret_rotation": False,
"rotation_poll_interval": None,
"appgw_subnet_prefix": None,
"enable_msi_auth_for_monitoring": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
with self.assertRaises(CLIInternalError):
dec_1.set_up_addon_profiles(None)
dec_mc_1 = dec_1.set_up_addon_profiles(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location", addon_profiles={}
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
self.assertEqual(dec_1.context.get_intermediate("monitoring"), None)
self.assertEqual(
dec_1.context.get_intermediate("enable_virtual_node"), None
)
self.assertEqual(
dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), None
)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"name": "test_name",
"resource_group_name": "test_rg_name",
"location": "test_location",
"vnet_subnet_id": "test_vnet_subnet_id",
"enable_addons": "monitoring,ingress-appgw,gitops",
"workspace_resource_id": "test_workspace_resource_id",
"enable_msi_auth_for_monitoring": True,
"appgw_name": "test_appgw_name",
"appgw_subnet_prefix": "test_appgw_subnet_prefix",
"appgw_id": "test_appgw_id",
"appgw_subnet_id": "test_appgw_subnet_id",
"appgw_watch_namespace": "test_appgw_watch_namespace",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
dec_2.context.set_intermediate(
"subscription_id", "test_subscription_id"
)
mc_2 = self.models.ManagedCluster(location="test_location")
with patch(
"azext_aks_preview.decorator.ensure_container_insights_for_monitoring",
return_value=None,
):
dec_mc_2 = dec_2.set_up_addon_profiles(mc_2)
addon_profiles_2 = {
CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id",
CONST_MONITORING_USING_AAD_MSI_AUTH: True,
},
),
CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name",
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id",
CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id",
CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_prefix",
CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace",
},
),
CONST_GITOPS_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
),
}
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location", addon_profiles=addon_profiles_2
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
self.assertEqual(dec_2.context.get_intermediate("monitoring"), True)
self.assertEqual(
dec_2.context.get_intermediate("enable_virtual_node"), None
)
self.assertEqual(
dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), True
)
def test_set_up_windows_profile(self):
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"windows_admin_username": None,
"windows_admin_password": None,
"enable_ahub": False,
"enable_windows_gmsa": False,
"gmsa_dns_server": None,
"gmsa_root_domain_name": None,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_1 = self.models.ManagedCluster(location="test_location")
with self.assertRaises(CLIInternalError):
dec_1.set_up_windows_profile(None)
dec_mc_1 = dec_1.set_up_windows_profile(mc_1)
ground_truth_mc_1 = self.models.ManagedCluster(location="test_location")
self.assertEqual(dec_mc_1, ground_truth_mc_1)
dec_2 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"windows_admin_username": "test_win_admin_name",
"windows_admin_password": "test_win_admin_password",
"enable_ahub": True,
"enable_windows_gmsa": True,
"gmsa_dns_server": "test_gmsa_dns_server",
"gmsa_root_domain_name": "test_gmsa_root_domain_name",
},
CUSTOM_MGMT_AKS_PREVIEW,
)
mc_2 = self.models.ManagedCluster(location="test_location")
dec_mc_2 = dec_2.set_up_windows_profile(mc_2)
windows_gmsa_profile_2 = self.models.WindowsGmsaProfile(
enabled=True,
dns_server="test_gmsa_dns_server",
root_domain_name="test_gmsa_root_domain_name",
)
windows_profile_2 = self.models.ManagedClusterWindowsProfile(
admin_username="test_win_admin_name",
admin_password="test_win_admin_password",
license_type="Windows_Server",
gmsa_profile=windows_gmsa_profile_2,
)
ground_truth_mc_2 = self.models.ManagedCluster(
location="test_location", windows_profile=windows_profile_2
)
self.assertEqual(dec_mc_2, ground_truth_mc_2)
def test_construct_preview_mc_profile(self):
import inspect
import paramiko
from azext_aks_preview.custom import aks_create
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_create).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"name",
"ssh_key_value",
]
self.assertEqual(positional_params, ground_truth_positional_params)
key = paramiko.RSAKey.generate(2048)
public_key = "{} {}".format(key.get_name(), key.get_base64())
raw_param_dict = {
"resource_group_name": "test_rg_name",
"name": "test_name",
"ssh_key_value": public_key,
}
raw_param_dict.update(optional_params)
from azure.cli.command_modules.acs.decorator import AKSParamDict
raw_param_dict = AKSParamDict(raw_param_dict)
dec_1 = AKSPreviewCreateDecorator(
self.cmd, self.client, raw_param_dict, CUSTOM_MGMT_AKS_PREVIEW
)
mock_profile = Mock(
get_subscription_id=Mock(return_value="1234-5678-9012")
)
with patch(
"azure.cli.command_modules.acs.decorator.get_rg_location",
return_value="test_location",
), patch(
"azure.cli.command_modules.acs.decorator.Profile",
return_value=mock_profile,
):
dec_mc_1 = dec_1.construct_preview_mc_profile()
agent_pool_profile_1 = self.models.ManagedClusterAgentPoolProfile(
name="nodepool1",
count=3,
vm_size="Standard_DS2_v2",
os_type="Linux",
enable_node_public_ip=False,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
type="VirtualMachineScaleSets",
mode="System",
enable_auto_scaling=False,
enable_fips=False,
)
ssh_config_1 = self.models.ContainerServiceSshConfiguration(
public_keys=[
self.models.ContainerServiceSshPublicKey(key_data=public_key)
]
)
linux_profile_1 = self.models.ContainerServiceLinuxProfile(
admin_username="azureuser", ssh=ssh_config_1
)
network_profile_1 = self.models.ContainerServiceNetworkProfile(
load_balancer_sku="standard",
)
identity_1 = self.models.ManagedClusterIdentity(type="SystemAssigned")
ground_truth_mc_1 = self.models.ManagedCluster(
location="test_location",
dns_prefix="testname-testrgname-1234-5",
kubernetes_version="",
addon_profiles={},
enable_rbac=True,
agent_pool_profiles=[agent_pool_profile_1],
linux_profile=linux_profile_1,
network_profile=network_profile_1,
identity=identity_1,
disable_local_accounts=False,
enable_pod_security_policy=False,
)
self.assertEqual(dec_mc_1, ground_truth_mc_1)
raw_param_dict.print_usage_statistics()
def test_create_mc(self):
mc_1 = self.models.ManagedCluster(
location="test_location",
addon_profiles={
CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile(
enabled=True,
config={
CONST_MONITORING_USING_AAD_MSI_AUTH: True,
},
)
},
)
dec_1 = AKSPreviewCreateDecorator(
self.cmd,
self.client,
{
"resource_group_name": "test_rg_name",
"name": "test_name",
"enable_managed_identity": True,
"no_wait": False,
},
CUSTOM_MGMT_AKS_PREVIEW,
)
dec_1.context.attach_mc(mc_1)
dec_1.context.set_intermediate(
"monitoring", True, overwrite_exists=True
)
dec_1.context.set_intermediate(
"subscription_id", "test_subscription_id", overwrite_exists=True
)
resp = requests.Response()
resp.status_code = 500
err = CloudError(resp)
err.message = "not found in Active Directory tenant"
with self.assertRaises(CloudError), patch("time.sleep"), patch(
"azure.cli.command_modules.acs.decorator.AKSCreateDecorator.create_mc"
), patch(
"azext_aks_preview.decorator.ensure_container_insights_for_monitoring",
side_effect=err,
) as ensure_monitoring:
dec_1.create_mc(mc_1)
ensure_monitoring.assert_called_with(
self.cmd,
mc_1.addon_profiles[CONST_MONITORING_ADDON_NAME],
"test_subscription_id",
"test_rg_name",
"test_name",
"test_location",
remove_monitoring=False,
aad_route=True,
create_dcr=False,
create_dcra=True,
)
class AKSPreviewUpdateDecoratorTestCase(unittest.TestCase):
def setUp(self):
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW)
self.client = MockClient()
| true | true |
1c330b1729e9ea2a5375109526f749c310ba72a2 | 9,272 | py | Python | nets.py | asappinc/emergent_comms_negotiation | 19ad405dcb83a3a521b6e1752cec075b69aa164b | [
"MIT"
] | 9 | 2020-03-04T13:24:25.000Z | 2022-03-15T09:52:37.000Z | nets.py | asappinc/emergent_comms_negotiation | 19ad405dcb83a3a521b6e1752cec075b69aa164b | [
"MIT"
] | 2 | 2019-12-30T07:28:33.000Z | 2020-10-13T11:38:34.000Z | nets.py | asappinc/emergent_comms_negotiation | 19ad405dcb83a3a521b6e1752cec075b69aa164b | [
"MIT"
] | 6 | 2018-03-15T18:08:45.000Z | 2019-07-15T06:49:16.000Z | import torch
from torch import nn, autograd
from torch.autograd import Variable
import torch.nn.functional as F
class NumberSequenceEncoder(nn.Module):
def __init__(self, num_values, embedding_size=100):
"""
eg for values 0,1,2,3,4,5, num_values will be: 6
for 0,1,..,9 num_values will be: 10
"""
super().__init__()
self.embedding_size = embedding_size
self.num_values = num_values
self.embedding = nn.Embedding(num_values, embedding_size)
self.lstm = nn.LSTMCell(
input_size=embedding_size,
hidden_size=embedding_size)
self.zero_state = None
def forward(self, x):
batch_size = x.size()[0]
seq_len = x.size()[1]
x = x.transpose(0, 1)
x = self.embedding(x)
type_constr = torch.cuda if x.is_cuda else torch
state = (
Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0)),
Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0))
)
for s in range(seq_len):
state = self.lstm(x[s], state)
return state[0]
class CombinedNet(nn.Module):
def __init__(self, num_sources=3, embedding_size=100):
super().__init__()
self.embedding_size = embedding_size
self.h1 = nn.Linear(embedding_size * num_sources, embedding_size)
def forward(self, x):
x = self.h1(x)
x = F.relu(x)
return x
class TermPolicy(nn.Module):
def __init__(self, embedding_size=100):
super().__init__()
self.h1 = nn.Linear(embedding_size, 1)
def forward(self, thoughtvector, testing, eps=1e-8):
logits = self.h1(thoughtvector)
term_probs = F.sigmoid(logits)
matches_argmax_count = 0
res_greedy = (term_probs.data >= 0.5).view(-1, 1).float()
log_g = None
if not testing:
a = torch.bernoulli(term_probs)
g = a.detach() * term_probs + (1 - a.detach()) * (1 - term_probs)
log_g = g.log()
a = a.data
else:
a = res_greedy
matches_greedy = res_greedy == a
matches_greedy_count = matches_greedy.int().sum()
term_probs = term_probs + eps
entropy = - (term_probs * term_probs.log()).sum(1).sum()
return term_probs, log_g, a.byte(), entropy, matches_greedy_count
class UtterancePolicy(nn.Module):
def __init__(self, embedding_size=100, num_tokens=10, max_len=6):
super().__init__()
self.embedding_size = embedding_size
self.num_tokens = num_tokens
self.max_len = max_len
self.embedding = nn.Embedding(num_tokens, embedding_size)
self.lstm = nn.LSTMCell(
input_size=embedding_size,
hidden_size=embedding_size
)
self.h1 = nn.Linear(embedding_size, num_tokens)
def forward(self, h_t, testing, eps=1e-8):
batch_size = h_t.size()[0]
type_constr = torch.cuda if h_t.is_cuda else torch
h = h_t
c = Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0))
matches_argmax_count = 0
last_token = type_constr.LongTensor(batch_size).fill_(0)
utterance_nodes = []
type_constr = torch.cuda if h_t.is_cuda else torch
utterance = type_constr.LongTensor(batch_size, self.max_len).fill_(0)
entropy = 0
matches_argmax_count = 0
stochastic_draws_count = 0
for i in range(self.max_len):
embedded = self.embedding(Variable(last_token))
h, c = self.lstm(embedded, (h, c))
logits = self.h1(h)
probs = F.softmax(logits)
_, res_greedy = probs.data.max(1)
res_greedy = res_greedy.view(-1, 1).long()
log_g = None
if not testing:
a = torch.multinomial(probs)
g = torch.gather(probs, 1, Variable(a.data))
log_g = g.log()
a = a.data
else:
a = res_greedy
matches_argmax = res_greedy == a
matches_argmax_count += matches_argmax.int().sum()
stochastic_draws_count += batch_size
if log_g is not None:
utterance_nodes.append(log_g)
last_token = a.view(batch_size)
utterance[:, i] = last_token
probs = probs + eps
entropy -= (probs * probs.log()).sum(1).sum()
return utterance_nodes, utterance, entropy, matches_argmax_count, stochastic_draws_count
class ProposalPolicy(nn.Module):
def __init__(self, embedding_size=100, num_counts=6, num_items=3):
super().__init__()
self.num_counts = num_counts
self.num_items = num_items
self.embedding_size = embedding_size
self.fcs = []
for i in range(num_items):
fc = nn.Linear(embedding_size, num_counts)
self.fcs.append(fc)
self.__setattr__('h1_%s' % i, fc)
def forward(self, x, testing, eps=1e-8):
batch_size = x.size()[0]
nodes = []
entropy = 0
matches_argmax_count = 0
type_constr = torch.cuda if x.is_cuda else torch
matches_argmax_count = 0
stochastic_draws = 0
proposal = type_constr.LongTensor(batch_size, self.num_items).fill_(0)
for i in range(self.num_items):
logits = self.fcs[i](x)
probs = F.softmax(logits)
_, res_greedy = probs.data.max(1)
res_greedy = res_greedy.view(-1, 1).long()
log_g = None
if not testing:
a = torch.multinomial(probs)
g = torch.gather(probs, 1, Variable(a.data))
log_g = g.log()
a = a.data
else:
a = res_greedy
matches_argmax = res_greedy == a
matches_argmax_count += matches_argmax.int().sum()
stochastic_draws += batch_size
if log_g is not None:
nodes.append(log_g)
probs = probs + eps
entropy += (- probs * probs.log()).sum(1).sum()
proposal[:, i] = a
return nodes, proposal, entropy, matches_argmax_count, stochastic_draws
class AgentModel(nn.Module):
def __init__(
self, enable_comms, enable_proposal,
term_entropy_reg,
utterance_entropy_reg,
proposal_entropy_reg,
embedding_size=100):
super().__init__()
self.term_entropy_reg = term_entropy_reg
self.utterance_entropy_reg = utterance_entropy_reg
self.proposal_entropy_reg = proposal_entropy_reg
self.embedding_size = embedding_size
self.enable_comms = enable_comms
self.enable_proposal = enable_proposal
self.context_net = NumberSequenceEncoder(num_values=6)
self.utterance_net = NumberSequenceEncoder(num_values=10)
self.proposal_net = NumberSequenceEncoder(num_values=6)
self.proposal_net.embedding = self.context_net.embedding
self.combined_net = CombinedNet()
self.term_policy = TermPolicy()
self.utterance_policy = UtterancePolicy()
self.proposal_policy = ProposalPolicy()
def forward(self, pool, utility, m_prev, prev_proposal, testing):
"""
setting testing to True disables stochasticity: always picks the argmax
cannot use this when training
"""
batch_size = pool.size()[0]
context = torch.cat([pool, utility], 1)
c_h = self.context_net(context)
type_constr = torch.cuda if context.is_cuda else torch
if self.enable_comms:
m_h = self.utterance_net(m_prev)
else:
m_h = Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0))
p_h = self.proposal_net(prev_proposal)
h_t = torch.cat([c_h, m_h, p_h], -1)
h_t = self.combined_net(h_t)
entropy_loss = 0
nodes = []
term_probs, term_node, term_a, entropy, term_matches_argmax_count = self.term_policy(h_t, testing=testing)
nodes.append(term_node)
entropy_loss -= entropy * self.term_entropy_reg
utterance = None
if self.enable_comms:
utterance_nodes, utterance, utterance_entropy, utt_matches_argmax_count, utt_stochastic_draws = self.utterance_policy(
h_t, testing=testing)
nodes += utterance_nodes
entropy_loss -= self.utterance_entropy_reg * utterance_entropy
else:
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
utterance = type_constr.LongTensor(batch_size, 6).zero_() # hard-coding 6 here is a bit hacky...
proposal_nodes, proposal, proposal_entropy, prop_matches_argmax_count, prop_stochastic_draws = self.proposal_policy(
h_t, testing=testing)
nodes += proposal_nodes
entropy_loss -= self.proposal_entropy_reg * proposal_entropy
return nodes, term_a, utterance, proposal, entropy_loss, \
term_matches_argmax_count, utt_matches_argmax_count, utt_stochastic_draws, prop_matches_argmax_count, prop_stochastic_draws
| 36.503937 | 135 | 0.612058 | import torch
from torch import nn, autograd
from torch.autograd import Variable
import torch.nn.functional as F
class NumberSequenceEncoder(nn.Module):
def __init__(self, num_values, embedding_size=100):
super().__init__()
self.embedding_size = embedding_size
self.num_values = num_values
self.embedding = nn.Embedding(num_values, embedding_size)
self.lstm = nn.LSTMCell(
input_size=embedding_size,
hidden_size=embedding_size)
self.zero_state = None
def forward(self, x):
batch_size = x.size()[0]
seq_len = x.size()[1]
x = x.transpose(0, 1)
x = self.embedding(x)
type_constr = torch.cuda if x.is_cuda else torch
state = (
Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0)),
Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0))
)
for s in range(seq_len):
state = self.lstm(x[s], state)
return state[0]
class CombinedNet(nn.Module):
def __init__(self, num_sources=3, embedding_size=100):
super().__init__()
self.embedding_size = embedding_size
self.h1 = nn.Linear(embedding_size * num_sources, embedding_size)
def forward(self, x):
x = self.h1(x)
x = F.relu(x)
return x
class TermPolicy(nn.Module):
def __init__(self, embedding_size=100):
super().__init__()
self.h1 = nn.Linear(embedding_size, 1)
def forward(self, thoughtvector, testing, eps=1e-8):
logits = self.h1(thoughtvector)
term_probs = F.sigmoid(logits)
matches_argmax_count = 0
res_greedy = (term_probs.data >= 0.5).view(-1, 1).float()
log_g = None
if not testing:
a = torch.bernoulli(term_probs)
g = a.detach() * term_probs + (1 - a.detach()) * (1 - term_probs)
log_g = g.log()
a = a.data
else:
a = res_greedy
matches_greedy = res_greedy == a
matches_greedy_count = matches_greedy.int().sum()
term_probs = term_probs + eps
entropy = - (term_probs * term_probs.log()).sum(1).sum()
return term_probs, log_g, a.byte(), entropy, matches_greedy_count
class UtterancePolicy(nn.Module):
def __init__(self, embedding_size=100, num_tokens=10, max_len=6):
super().__init__()
self.embedding_size = embedding_size
self.num_tokens = num_tokens
self.max_len = max_len
self.embedding = nn.Embedding(num_tokens, embedding_size)
self.lstm = nn.LSTMCell(
input_size=embedding_size,
hidden_size=embedding_size
)
self.h1 = nn.Linear(embedding_size, num_tokens)
def forward(self, h_t, testing, eps=1e-8):
batch_size = h_t.size()[0]
type_constr = torch.cuda if h_t.is_cuda else torch
h = h_t
c = Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0))
matches_argmax_count = 0
last_token = type_constr.LongTensor(batch_size).fill_(0)
utterance_nodes = []
type_constr = torch.cuda if h_t.is_cuda else torch
utterance = type_constr.LongTensor(batch_size, self.max_len).fill_(0)
entropy = 0
matches_argmax_count = 0
stochastic_draws_count = 0
for i in range(self.max_len):
embedded = self.embedding(Variable(last_token))
h, c = self.lstm(embedded, (h, c))
logits = self.h1(h)
probs = F.softmax(logits)
_, res_greedy = probs.data.max(1)
res_greedy = res_greedy.view(-1, 1).long()
log_g = None
if not testing:
a = torch.multinomial(probs)
g = torch.gather(probs, 1, Variable(a.data))
log_g = g.log()
a = a.data
else:
a = res_greedy
matches_argmax = res_greedy == a
matches_argmax_count += matches_argmax.int().sum()
stochastic_draws_count += batch_size
if log_g is not None:
utterance_nodes.append(log_g)
last_token = a.view(batch_size)
utterance[:, i] = last_token
probs = probs + eps
entropy -= (probs * probs.log()).sum(1).sum()
return utterance_nodes, utterance, entropy, matches_argmax_count, stochastic_draws_count
class ProposalPolicy(nn.Module):
def __init__(self, embedding_size=100, num_counts=6, num_items=3):
super().__init__()
self.num_counts = num_counts
self.num_items = num_items
self.embedding_size = embedding_size
self.fcs = []
for i in range(num_items):
fc = nn.Linear(embedding_size, num_counts)
self.fcs.append(fc)
self.__setattr__('h1_%s' % i, fc)
def forward(self, x, testing, eps=1e-8):
batch_size = x.size()[0]
nodes = []
entropy = 0
matches_argmax_count = 0
type_constr = torch.cuda if x.is_cuda else torch
matches_argmax_count = 0
stochastic_draws = 0
proposal = type_constr.LongTensor(batch_size, self.num_items).fill_(0)
for i in range(self.num_items):
logits = self.fcs[i](x)
probs = F.softmax(logits)
_, res_greedy = probs.data.max(1)
res_greedy = res_greedy.view(-1, 1).long()
log_g = None
if not testing:
a = torch.multinomial(probs)
g = torch.gather(probs, 1, Variable(a.data))
log_g = g.log()
a = a.data
else:
a = res_greedy
matches_argmax = res_greedy == a
matches_argmax_count += matches_argmax.int().sum()
stochastic_draws += batch_size
if log_g is not None:
nodes.append(log_g)
probs = probs + eps
entropy += (- probs * probs.log()).sum(1).sum()
proposal[:, i] = a
return nodes, proposal, entropy, matches_argmax_count, stochastic_draws
class AgentModel(nn.Module):
def __init__(
self, enable_comms, enable_proposal,
term_entropy_reg,
utterance_entropy_reg,
proposal_entropy_reg,
embedding_size=100):
super().__init__()
self.term_entropy_reg = term_entropy_reg
self.utterance_entropy_reg = utterance_entropy_reg
self.proposal_entropy_reg = proposal_entropy_reg
self.embedding_size = embedding_size
self.enable_comms = enable_comms
self.enable_proposal = enable_proposal
self.context_net = NumberSequenceEncoder(num_values=6)
self.utterance_net = NumberSequenceEncoder(num_values=10)
self.proposal_net = NumberSequenceEncoder(num_values=6)
self.proposal_net.embedding = self.context_net.embedding
self.combined_net = CombinedNet()
self.term_policy = TermPolicy()
self.utterance_policy = UtterancePolicy()
self.proposal_policy = ProposalPolicy()
def forward(self, pool, utility, m_prev, prev_proposal, testing):
batch_size = pool.size()[0]
context = torch.cat([pool, utility], 1)
c_h = self.context_net(context)
type_constr = torch.cuda if context.is_cuda else torch
if self.enable_comms:
m_h = self.utterance_net(m_prev)
else:
m_h = Variable(type_constr.FloatTensor(batch_size, self.embedding_size).fill_(0))
p_h = self.proposal_net(prev_proposal)
h_t = torch.cat([c_h, m_h, p_h], -1)
h_t = self.combined_net(h_t)
entropy_loss = 0
nodes = []
term_probs, term_node, term_a, entropy, term_matches_argmax_count = self.term_policy(h_t, testing=testing)
nodes.append(term_node)
entropy_loss -= entropy * self.term_entropy_reg
utterance = None
if self.enable_comms:
utterance_nodes, utterance, utterance_entropy, utt_matches_argmax_count, utt_stochastic_draws = self.utterance_policy(
h_t, testing=testing)
nodes += utterance_nodes
entropy_loss -= self.utterance_entropy_reg * utterance_entropy
else:
utt_matches_argmax_count = 0
utt_stochastic_draws = 0
utterance = type_constr.LongTensor(batch_size, 6).zero_()
proposal_nodes, proposal, proposal_entropy, prop_matches_argmax_count, prop_stochastic_draws = self.proposal_policy(
h_t, testing=testing)
nodes += proposal_nodes
entropy_loss -= self.proposal_entropy_reg * proposal_entropy
return nodes, term_a, utterance, proposal, entropy_loss, \
term_matches_argmax_count, utt_matches_argmax_count, utt_stochastic_draws, prop_matches_argmax_count, prop_stochastic_draws
| true | true |
1c330bb9ecd571857f9b054f2104c084f9a82eb4 | 7,983 | py | Python | Bag of Features/preprocessing_surf.py | varunsingh251/Indian-Sign-Language-Recognition | 07eb060f4c22821e69351b0931ebaadc2e9193a8 | [
"MIT"
] | 147 | 2017-12-08T19:52:18.000Z | 2022-02-23T04:40:02.000Z | Bag of Features/preprocessing_surf.py | hyper07/Indian-Sign-Language-Recognition | 40604e5c0a2a9b6310e26c0fa31b4f4f1c30de45 | [
"MIT"
] | 13 | 2018-01-26T19:21:34.000Z | 2021-11-15T11:53:30.000Z | Bag of Features/preprocessing_surf.py | hyper07/Indian-Sign-Language-Recognition | 40604e5c0a2a9b6310e26c0fa31b4f4f1c30de45 | [
"MIT"
] | 86 | 2017-11-20T10:23:08.000Z | 2022-03-30T14:39:36.000Z | import numpy as np
import cv2
import os
import csv
import sklearn.metrics as sm
from surf_image_processing import func,func2
from sklearn.cluster import MiniBatchKMeans
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
import random
import warnings
import pickle
from sklearn.naive_bayes import GaussianNB as nb
from sklearn.neighbors import KNeighborsClassifier as knn
from sklearn.linear_model import LogisticRegression as lr
from sklearn.neural_network import MLPClassifier as mlp
import numpy as np
import sklearn.metrics as sm
#initialise
path="train"
label=0
img_descs=[]
y=[]
#utility functions
def perform_data_split(X, y, training_idxs, test_idxs, val_idxs):
"""
Split X and y into train/test/val sets
Parameters:
-----------
X : eg, use img_bow_hist
y : corresponding labels for X
training_idxs : list/array of integers used as indicies for training rows
test_idxs : same
val_idxs : same
Returns:
--------
X_train, X_test, X_val, y_train, y_test, y_val
"""
X_train = X[training_idxs]
X_test = X[test_idxs]
X_val = X[val_idxs]
y_train = y[training_idxs]
y_test = y[test_idxs]
y_val = y[val_idxs]
return X_train, X_test, X_val, y_train, y_test, y_val
def train_test_val_split_idxs(total_rows, percent_test, percent_val):
"""
Get indexes for training, test, and validation rows, given a total number of rows.
Assumes indexes are sequential integers starting at 0: eg [0,1,2,3,...N]
Returns:
--------
training_idxs, test_idxs, val_idxs
Both lists of integers
"""
if percent_test + percent_val >= 1.0:
raise ValueError('percent_test and percent_val must sum to less than 1.0')
row_range = range(total_rows)
no_test_rows = int(total_rows*(percent_test))
test_idxs = np.random.choice(row_range, size=no_test_rows, replace=False)
# remove test indexes
row_range = [idx for idx in row_range if idx not in test_idxs]
no_val_rows = int(total_rows*(percent_val))
val_idxs = np.random.choice(row_range, size=no_val_rows, replace=False)
# remove validation indexes
training_idxs = [idx for idx in row_range if idx not in val_idxs]
print('Train-test-val split: %i training rows, %i test rows, %i validation rows' % (len(training_idxs), len(test_idxs), len(val_idxs)))
return training_idxs, test_idxs, val_idxs
def cluster_features(img_descs, training_idxs, cluster_model):
"""
Cluster the training features using the cluster_model
and convert each set of descriptors in img_descs
to a Visual Bag of Words histogram.
Parameters:
-----------
X : list of lists of SIFT descriptors (img_descs)
training_idxs : array/list of integers
Indicies for the training rows in img_descs
cluster_model : clustering model (eg KMeans from scikit-learn)
The model used to cluster the SIFT features
Returns:
--------
X, cluster_model :
X has K feature columns, each column corresponding to a visual word
cluster_model has been fit to the training set
"""
n_clusters = cluster_model.n_clusters
# # Generate the SIFT descriptor features
# img_descs = gen_sift_features(labeled_img_paths)
#
# # Generate indexes of training rows
# total_rows = len(img_descs)
# training_idxs, test_idxs, val_idxs = train_test_val_split_idxs(total_rows, percent_test, percent_val)
# Concatenate all descriptors in the training set together
training_descs = [img_descs[i] for i in training_idxs]
all_train_descriptors = [desc for desc_list in training_descs for desc in desc_list]
all_train_descriptors = np.array(all_train_descriptors)
print ('%i descriptors before clustering' % all_train_descriptors.shape[0])
# Cluster descriptors to get codebook
print ('Using clustering model %s...' % repr(cluster_model))
print ('Clustering on training set to get codebook of %i words' % n_clusters)
# train kmeans or other cluster model on those descriptors selected above
cluster_model.fit(all_train_descriptors)
print ('done clustering. Using clustering model to generate BoW histograms for each image.')
# compute set of cluster-reduced words for each image
img_clustered_words = [cluster_model.predict(raw_words) for raw_words in img_descs]
# finally make a histogram of clustered word counts for each image. These are the final features.
img_bow_hist = np.array(
[np.bincount(clustered_words, minlength=n_clusters) for clustered_words in img_clustered_words])
X = img_bow_hist
print ('done generating BoW histograms.')
return X, cluster_model
def calc_accuracy(method,label_test,pred):
print("accuracy score for ",method,sm.accuracy_score(label_test,pred))
print("precision_score for ",method,sm.precision_score(label_test,pred,average='micro'))
print("f1 score for ",method,sm.f1_score(label_test,pred,average='micro'))
print("recall score for ",method,sm.recall_score(label_test,pred,average='micro'))
def predict_svm(X_train, X_test, y_train, y_test):
svc=SVC(kernel='linear')
print("svm started")
svc.fit(X_train,y_train)
y_pred=svc.predict(X_test)
calc_accuracy("SVM",y_test,y_pred)
np.savetxt('submission_surf_svm.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')
def predict_lr(X_train, X_test, y_train, y_test):
clf = lr()
print("lr started")
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
calc_accuracy("Logistic regression",y_test,y_pred)
np.savetxt('submission_surf_lr.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')
def predict_nb(X_train, X_test, y_train, y_test):
clf = nb()
print("nb started")
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
calc_accuracy("Naive Bayes",y_test,y_pred)
np.savetxt('submission_surf_nb.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')
def predict_knn(X_train, X_test, y_train, y_test):
clf=knn(n_neighbors=3)
print("knn started")
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
calc_accuracy("K nearest neighbours",y_test,y_pred)
np.savetxt('submission_surf_knn.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')
def predict_mlp(X_train, X_test, y_train, y_test):
clf=mlp()
print("mlp started")
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
calc_accuracy("MLP classifier",y_test,y_pred)
#creating desc for each file with label
for (dirpath,dirnames,filenames) in os.walk(path):
for dirname in dirnames:
print(dirname)
for(direcpath,direcnames,files) in os.walk(path+"\\"+dirname):
for file in files:
actual_path=path+"\\\\"+dirname+"\\\\"+file
print(actual_path)
des=func(actual_path)
img_descs.append(des)
y.append(label)
label=label+1
#finding indexes of test train and validate
y=np.array(y)
training_idxs, test_idxs, val_idxs = train_test_val_split_idxs(len(img_descs), 0.4, 0.0)
#creating histogram using kmeans minibatch cluster model
X, cluster_model = cluster_features(img_descs, training_idxs, MiniBatchKMeans(n_clusters=150))
#splitting data into test, train, validate using the indexes
X_train, X_test, X_val, y_train, y_test, y_val = perform_data_split(X, y, training_idxs, test_idxs, val_idxs)
#using classification methods
predict_knn(X_train, X_test,y_train, y_test)
#predict_mlp(X_train, X_test,y_train, y_test)
predict_svm(X_train, X_test,y_train, y_test)
predict_lr(X_train, X_test,y_train, y_test)
predict_nb(X_train, X_test,y_train, y_test)
| 36.619266 | 162 | 0.716523 | import numpy as np
import cv2
import os
import csv
import sklearn.metrics as sm
from surf_image_processing import func,func2
from sklearn.cluster import MiniBatchKMeans
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
import random
import warnings
import pickle
from sklearn.naive_bayes import GaussianNB as nb
from sklearn.neighbors import KNeighborsClassifier as knn
from sklearn.linear_model import LogisticRegression as lr
from sklearn.neural_network import MLPClassifier as mlp
import numpy as np
import sklearn.metrics as sm
path="train"
label=0
img_descs=[]
y=[]
def perform_data_split(X, y, training_idxs, test_idxs, val_idxs):
X_train = X[training_idxs]
X_test = X[test_idxs]
X_val = X[val_idxs]
y_train = y[training_idxs]
y_test = y[test_idxs]
y_val = y[val_idxs]
return X_train, X_test, X_val, y_train, y_test, y_val
def train_test_val_split_idxs(total_rows, percent_test, percent_val):
if percent_test + percent_val >= 1.0:
raise ValueError('percent_test and percent_val must sum to less than 1.0')
row_range = range(total_rows)
no_test_rows = int(total_rows*(percent_test))
test_idxs = np.random.choice(row_range, size=no_test_rows, replace=False)
row_range = [idx for idx in row_range if idx not in test_idxs]
no_val_rows = int(total_rows*(percent_val))
val_idxs = np.random.choice(row_range, size=no_val_rows, replace=False)
training_idxs = [idx for idx in row_range if idx not in val_idxs]
print('Train-test-val split: %i training rows, %i test rows, %i validation rows' % (len(training_idxs), len(test_idxs), len(val_idxs)))
return training_idxs, test_idxs, val_idxs
def cluster_features(img_descs, training_idxs, cluster_model):
n_clusters = cluster_model.n_clusters
n training_idxs]
all_train_descriptors = [desc for desc_list in training_descs for desc in desc_list]
all_train_descriptors = np.array(all_train_descriptors)
print ('%i descriptors before clustering' % all_train_descriptors.shape[0])
print ('Using clustering model %s...' % repr(cluster_model))
print ('Clustering on training set to get codebook of %i words' % n_clusters)
cluster_model.fit(all_train_descriptors)
print ('done clustering. Using clustering model to generate BoW histograms for each image.')
img_clustered_words = [cluster_model.predict(raw_words) for raw_words in img_descs]
img_bow_hist = np.array(
[np.bincount(clustered_words, minlength=n_clusters) for clustered_words in img_clustered_words])
X = img_bow_hist
print ('done generating BoW histograms.')
return X, cluster_model
def calc_accuracy(method,label_test,pred):
print("accuracy score for ",method,sm.accuracy_score(label_test,pred))
print("precision_score for ",method,sm.precision_score(label_test,pred,average='micro'))
print("f1 score for ",method,sm.f1_score(label_test,pred,average='micro'))
print("recall score for ",method,sm.recall_score(label_test,pred,average='micro'))
def predict_svm(X_train, X_test, y_train, y_test):
svc=SVC(kernel='linear')
print("svm started")
svc.fit(X_train,y_train)
y_pred=svc.predict(X_test)
calc_accuracy("SVM",y_test,y_pred)
np.savetxt('submission_surf_svm.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')
def predict_lr(X_train, X_test, y_train, y_test):
clf = lr()
print("lr started")
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
calc_accuracy("Logistic regression",y_test,y_pred)
np.savetxt('submission_surf_lr.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')
def predict_nb(X_train, X_test, y_train, y_test):
clf = nb()
print("nb started")
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
calc_accuracy("Naive Bayes",y_test,y_pred)
np.savetxt('submission_surf_nb.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')
def predict_knn(X_train, X_test, y_train, y_test):
clf=knn(n_neighbors=3)
print("knn started")
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
calc_accuracy("K nearest neighbours",y_test,y_pred)
np.savetxt('submission_surf_knn.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,Label,TrueLabel', comments = '', fmt='%d')
def predict_mlp(X_train, X_test, y_train, y_test):
clf=mlp()
print("mlp started")
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
calc_accuracy("MLP classifier",y_test,y_pred)
for (dirpath,dirnames,filenames) in os.walk(path):
for dirname in dirnames:
print(dirname)
for(direcpath,direcnames,files) in os.walk(path+"\\"+dirname):
for file in files:
actual_path=path+"\\\\"+dirname+"\\\\"+file
print(actual_path)
des=func(actual_path)
img_descs.append(des)
y.append(label)
label=label+1
y=np.array(y)
training_idxs, test_idxs, val_idxs = train_test_val_split_idxs(len(img_descs), 0.4, 0.0)
X, cluster_model = cluster_features(img_descs, training_idxs, MiniBatchKMeans(n_clusters=150))
X_train, X_test, X_val, y_train, y_test, y_val = perform_data_split(X, y, training_idxs, test_idxs, val_idxs)
predict_knn(X_train, X_test,y_train, y_test)
predict_svm(X_train, X_test,y_train, y_test)
predict_lr(X_train, X_test,y_train, y_test)
predict_nb(X_train, X_test,y_train, y_test)
| true | true |
1c330bd99ebe226153876feb67b2d0e928a6c4c2 | 373 | py | Python | companies/migrations/0012_remove_company_is_listed.py | Valuehorizon/valuehorizon-companies | 5366e230da69ee30fcdc1bf4beddc99310f6b767 | [
"MIT"
] | 1 | 2015-09-28T17:11:12.000Z | 2015-09-28T17:11:12.000Z | companies/migrations/0012_remove_company_is_listed.py | Valuehorizon/valuehorizon-companies | 5366e230da69ee30fcdc1bf4beddc99310f6b767 | [
"MIT"
] | 4 | 2020-02-11T22:59:54.000Z | 2021-06-10T17:55:15.000Z | companies/migrations/0012_remove_company_is_listed.py | Valuehorizon/valuehorizon-companies | 5366e230da69ee30fcdc1bf4beddc99310f6b767 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companies', '0011_company_latest_incorporation_date'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='is_listed',
),
]
| 19.631579 | 64 | 0.624665 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companies', '0011_company_latest_incorporation_date'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='is_listed',
),
]
| true | true |
1c330c30ecf527114842ab07626b84a090588d9a | 481 | py | Python | sample_dashboard/sample_dashboard/urls.py | Georgitanev/django_dashboard | 5be2680712fcedf33965fd66a69d46e1e32ab04b | [
"MIT"
] | null | null | null | sample_dashboard/sample_dashboard/urls.py | Georgitanev/django_dashboard | 5be2680712fcedf33965fd66a69d46e1e32ab04b | [
"MIT"
] | null | null | null | sample_dashboard/sample_dashboard/urls.py | Georgitanev/django_dashboard | 5be2680712fcedf33965fd66a69d46e1e32ab04b | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include
from django.urls import path
from django.views.generic import RedirectView
urlpatterns = [
path("admin/", admin.site.urls),
path("google_analytics/", include("google_analytics.urls")),
path("", RedirectView.as_view(url="/google_analytics/", permanent=True)),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 37 | 77 | 0.777547 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include
from django.urls import path
from django.views.generic import RedirectView
urlpatterns = [
path("admin/", admin.site.urls),
path("google_analytics/", include("google_analytics.urls")),
path("", RedirectView.as_view(url="/google_analytics/", permanent=True)),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| true | true |
1c330c5d095231e2570eaac069980322ec14776b | 8,534 | py | Python | releasenotes/source/conf.py | openstack/puppet-rally | f6521d64b11bca1da14a6f7848ba82284b348299 | [
"Apache-2.0"
] | 18 | 2016-01-03T07:26:56.000Z | 2019-04-12T09:22:39.000Z | releasenotes/source/conf.py | openstack/puppet-rally | f6521d64b11bca1da14a6f7848ba82284b348299 | [
"Apache-2.0"
] | null | null | null | releasenotes/source/conf.py | openstack/puppet-rally | f6521d64b11bca1da14a6f7848ba82284b348299 | [
"Apache-2.0"
] | 2 | 2016-01-21T07:39:32.000Z | 2017-06-15T08:18:00.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2016, Puppet OpenStack Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'puppet-rallyReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'puppet-rallyReleaseNotes.tex', u'puppet-rally Release Notes Documentation',
u'2016, Puppet OpenStack Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'puppet-rallyreleasenotes', u'puppet-rally Release Notes Documentation',
[u'2016, Puppet OpenStack Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'puppet-rallyReleaseNotes', u'puppet-rally Release Notes Documentation',
u'2016, Puppet OpenStack Developers', 'puppet-rallyReleaseNotes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/puppet-rally'
openstackdocs_bug_project = 'puppet-rally'
openstackdocs_bug_tag = ''
openstackdocs_auto_name = False
| 33.206226 | 104 | 0.724045 |
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
copyright = u'2016, Puppet OpenStack Developers'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'puppet-rallyReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'puppet-rallyReleaseNotes.tex', u'puppet-rally Release Notes Documentation',
u'2016, Puppet OpenStack Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'puppet-rallyreleasenotes', u'puppet-rally Release Notes Documentation',
[u'2016, Puppet OpenStack Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'puppet-rallyReleaseNotes', u'puppet-rally Release Notes Documentation',
u'2016, Puppet OpenStack Developers', 'puppet-rallyReleaseNotes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
locale_dirs = ['locale/']
openstackdocs_repo_name = 'openstack/puppet-rally'
openstackdocs_bug_project = 'puppet-rally'
openstackdocs_bug_tag = ''
openstackdocs_auto_name = False
| true | true |
1c330c9e85e20a00de1d955a4dbf54c1475c417e | 263 | py | Python | calculator_functions.py | simeckova/special_button_clicker | c49de7b8d18d30327d48ad782e1f2c92e9717da9 | [
"MIT"
] | null | null | null | calculator_functions.py | simeckova/special_button_clicker | c49de7b8d18d30327d48ad782e1f2c92e9717da9 | [
"MIT"
] | null | null | null | calculator_functions.py | simeckova/special_button_clicker | c49de7b8d18d30327d48ad782e1f2c92e9717da9 | [
"MIT"
] | null | null | null | def times_2(n: float) -> float:
return n * 2
def minus_7(n: float) -> float:
return n - 7
def plus_13(n: float) -> float:
return n + 13
def divide_by_3(n: float) -> float:
return n // 3
functions = [times_2, minus_7, plus_13, divide_by_3]
| 14.611111 | 52 | 0.61597 | def times_2(n: float) -> float:
return n * 2
def minus_7(n: float) -> float:
return n - 7
def plus_13(n: float) -> float:
return n + 13
def divide_by_3(n: float) -> float:
return n // 3
functions = [times_2, minus_7, plus_13, divide_by_3]
| true | true |
1c330ceb4e2f4e081fbe6fa36dd495cd2737e107 | 6,686 | py | Python | homeassistant/components/ecobee/weather.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/ecobee/weather.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/ecobee/weather.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Support for displaying weather info from Ecobee API."""
from __future__ import annotations
from datetime import timedelta
from pyecobee.const import ECOBEE_STATE_UNKNOWN
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
WeatherEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PRESSURE_HPA, PRESSURE_INHG, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import dt as dt_util
from homeassistant.util.pressure import convert as pressure_convert
from .const import (
DOMAIN,
ECOBEE_MODEL_TO_NAME,
ECOBEE_WEATHER_SYMBOL_TO_HASS,
MANUFACTURER,
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the ecobee weather platform."""
data = hass.data[DOMAIN]
dev = []
for index in range(len(data.ecobee.thermostats)):
thermostat = data.ecobee.get_thermostat(index)
if "weather" in thermostat:
dev.append(EcobeeWeather(data, thermostat["name"], index))
async_add_entities(dev, True)
class EcobeeWeather(WeatherEntity):
"""Representation of Ecobee weather data."""
def __init__(self, data, name, index):
"""Initialize the Ecobee weather platform."""
self.data = data
self._name = name
self._index = index
self.weather = None
def get_forecast(self, index, param):
"""Retrieve forecast parameter."""
try:
forecast = self.weather["forecasts"][index]
return forecast[param]
except (IndexError, KeyError) as err:
raise ValueError from err
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique identifier for the weather platform."""
return self.data.ecobee.get_thermostat(self._index)["identifier"]
@property
def device_info(self) -> DeviceInfo:
"""Return device information for the ecobee weather platform."""
thermostat = self.data.ecobee.get_thermostat(self._index)
model: str | None
try:
model = f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
except KeyError:
# Ecobee model is not in our list
model = None
return DeviceInfo(
identifiers={(DOMAIN, thermostat["identifier"])},
manufacturer=MANUFACTURER,
model=model,
name=self.name,
)
@property
def condition(self):
"""Return the current condition."""
try:
return ECOBEE_WEATHER_SYMBOL_TO_HASS[self.get_forecast(0, "weatherSymbol")]
except ValueError:
return None
@property
def temperature(self):
"""Return the temperature."""
try:
return float(self.get_forecast(0, "temperature")) / 10
except ValueError:
return None
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def pressure(self):
"""Return the pressure."""
try:
pressure = self.get_forecast(0, "pressure")
if not self.hass.config.units.is_metric:
pressure = pressure_convert(pressure, PRESSURE_HPA, PRESSURE_INHG)
return round(pressure, 2)
return round(pressure)
except ValueError:
return None
@property
def humidity(self):
"""Return the humidity."""
try:
return int(self.get_forecast(0, "relativeHumidity"))
except ValueError:
return None
@property
def visibility(self):
"""Return the visibility."""
try:
return int(self.get_forecast(0, "visibility")) / 1000
except ValueError:
return None
@property
def wind_speed(self):
"""Return the wind speed."""
try:
return int(self.get_forecast(0, "windSpeed"))
except ValueError:
return None
@property
def wind_bearing(self):
"""Return the wind direction."""
try:
return int(self.get_forecast(0, "windBearing"))
except ValueError:
return None
@property
def attribution(self):
"""Return the attribution."""
if not self.weather:
return None
station = self.weather.get("weatherStation", "UNKNOWN")
time = self.weather.get("timestamp", "UNKNOWN")
return f"Ecobee weather provided by {station} at {time} UTC"
@property
def forecast(self):
"""Return the forecast array."""
if "forecasts" not in self.weather:
return None
forecasts = []
date = dt_util.utcnow()
for day in range(0, 5):
forecast = _process_forecast(self.weather["forecasts"][day])
if forecast is None:
continue
forecast[ATTR_FORECAST_TIME] = date.isoformat()
date += timedelta(days=1)
forecasts.append(forecast)
if forecasts:
return forecasts
return None
async def async_update(self):
"""Get the latest weather data."""
await self.data.update()
thermostat = self.data.ecobee.get_thermostat(self._index)
self.weather = thermostat.get("weather")
def _process_forecast(json):
"""Process a single ecobee API forecast to return expected values."""
forecast = {}
try:
forecast[ATTR_FORECAST_CONDITION] = ECOBEE_WEATHER_SYMBOL_TO_HASS[
json["weatherSymbol"]
]
if json["tempHigh"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_TEMP] = float(json["tempHigh"]) / 10
if json["tempLow"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_TEMP_LOW] = float(json["tempLow"]) / 10
if json["windBearing"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_WIND_BEARING] = int(json["windBearing"])
if json["windSpeed"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_WIND_SPEED] = int(json["windSpeed"])
except (ValueError, IndexError, KeyError):
return None
if forecast:
return forecast
return None
| 30.52968 | 87 | 0.630123 | from __future__ import annotations
from datetime import timedelta
from pyecobee.const import ECOBEE_STATE_UNKNOWN
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
WeatherEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PRESSURE_HPA, PRESSURE_INHG, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import dt as dt_util
from homeassistant.util.pressure import convert as pressure_convert
from .const import (
DOMAIN,
ECOBEE_MODEL_TO_NAME,
ECOBEE_WEATHER_SYMBOL_TO_HASS,
MANUFACTURER,
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
data = hass.data[DOMAIN]
dev = []
for index in range(len(data.ecobee.thermostats)):
thermostat = data.ecobee.get_thermostat(index)
if "weather" in thermostat:
dev.append(EcobeeWeather(data, thermostat["name"], index))
async_add_entities(dev, True)
class EcobeeWeather(WeatherEntity):
def __init__(self, data, name, index):
self.data = data
self._name = name
self._index = index
self.weather = None
def get_forecast(self, index, param):
try:
forecast = self.weather["forecasts"][index]
return forecast[param]
except (IndexError, KeyError) as err:
raise ValueError from err
@property
def name(self):
return self._name
@property
def unique_id(self):
return self.data.ecobee.get_thermostat(self._index)["identifier"]
@property
def device_info(self) -> DeviceInfo:
thermostat = self.data.ecobee.get_thermostat(self._index)
model: str | None
try:
model = f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
except KeyError:
model = None
return DeviceInfo(
identifiers={(DOMAIN, thermostat["identifier"])},
manufacturer=MANUFACTURER,
model=model,
name=self.name,
)
@property
def condition(self):
try:
return ECOBEE_WEATHER_SYMBOL_TO_HASS[self.get_forecast(0, "weatherSymbol")]
except ValueError:
return None
@property
def temperature(self):
try:
return float(self.get_forecast(0, "temperature")) / 10
except ValueError:
return None
@property
def temperature_unit(self):
return TEMP_FAHRENHEIT
@property
def pressure(self):
try:
pressure = self.get_forecast(0, "pressure")
if not self.hass.config.units.is_metric:
pressure = pressure_convert(pressure, PRESSURE_HPA, PRESSURE_INHG)
return round(pressure, 2)
return round(pressure)
except ValueError:
return None
@property
def humidity(self):
try:
return int(self.get_forecast(0, "relativeHumidity"))
except ValueError:
return None
@property
def visibility(self):
try:
return int(self.get_forecast(0, "visibility")) / 1000
except ValueError:
return None
@property
def wind_speed(self):
try:
return int(self.get_forecast(0, "windSpeed"))
except ValueError:
return None
@property
def wind_bearing(self):
try:
return int(self.get_forecast(0, "windBearing"))
except ValueError:
return None
@property
def attribution(self):
if not self.weather:
return None
station = self.weather.get("weatherStation", "UNKNOWN")
time = self.weather.get("timestamp", "UNKNOWN")
return f"Ecobee weather provided by {station} at {time} UTC"
@property
def forecast(self):
if "forecasts" not in self.weather:
return None
forecasts = []
date = dt_util.utcnow()
for day in range(0, 5):
forecast = _process_forecast(self.weather["forecasts"][day])
if forecast is None:
continue
forecast[ATTR_FORECAST_TIME] = date.isoformat()
date += timedelta(days=1)
forecasts.append(forecast)
if forecasts:
return forecasts
return None
async def async_update(self):
await self.data.update()
thermostat = self.data.ecobee.get_thermostat(self._index)
self.weather = thermostat.get("weather")
def _process_forecast(json):
forecast = {}
try:
forecast[ATTR_FORECAST_CONDITION] = ECOBEE_WEATHER_SYMBOL_TO_HASS[
json["weatherSymbol"]
]
if json["tempHigh"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_TEMP] = float(json["tempHigh"]) / 10
if json["tempLow"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_TEMP_LOW] = float(json["tempLow"]) / 10
if json["windBearing"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_WIND_BEARING] = int(json["windBearing"])
if json["windSpeed"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_WIND_SPEED] = int(json["windSpeed"])
except (ValueError, IndexError, KeyError):
return None
if forecast:
return forecast
return None
| true | true |
1c330d018b9287d66eea4498799152cb017ab675 | 928 | py | Python | website/orders/urls.py | KiOui/TOSTI | 72e65889c193727dcf3e3716b10c78a9774e1136 | [
"MIT"
] | 1 | 2020-05-10T21:10:43.000Z | 2020-05-10T21:10:43.000Z | website/orders/urls.py | KiOui/TOSTI | 72e65889c193727dcf3e3716b10c78a9774e1136 | [
"MIT"
] | 158 | 2020-05-04T12:37:41.000Z | 2022-03-31T20:15:07.000Z | website/orders/urls.py | KiOui/TOSTI | 72e65889c193727dcf3e3716b10c78a9774e1136 | [
"MIT"
] | null | null | null | from django.urls import path, register_converter
from orders import views
from .converters import ShiftConverter, OrderVenueConverter, OrderConverter
register_converter(ShiftConverter, "shift")
register_converter(OrderVenueConverter, "order_venue")
register_converter(OrderConverter, "order")
urlpatterns = [
path("explainer/", views.ExplainerView.as_view(), name="explainer"),
path("explainer-admin/", views.AdminExplainerView.as_view(), name="explainer_admin"),
path("<shift:shift>/order-items/", views.PlaceOrderView.as_view(), name="order"),
path("venue/<order_venue:venue>/create/", views.CreateShiftView.as_view(), name="shift_create"),
path("<shift:shift>/admin/", views.ShiftAdminView.as_view(), name="shift_admin"),
path("<shift:shift>/overview/", views.ShiftOverviewView.as_view(), name="shift_overview"),
path("<shift:shift>/join/", views.JoinShiftView.as_view(), name="shift_join"),
]
| 48.842105 | 100 | 0.75431 | from django.urls import path, register_converter
from orders import views
from .converters import ShiftConverter, OrderVenueConverter, OrderConverter
register_converter(ShiftConverter, "shift")
register_converter(OrderVenueConverter, "order_venue")
register_converter(OrderConverter, "order")
urlpatterns = [
path("explainer/", views.ExplainerView.as_view(), name="explainer"),
path("explainer-admin/", views.AdminExplainerView.as_view(), name="explainer_admin"),
path("<shift:shift>/order-items/", views.PlaceOrderView.as_view(), name="order"),
path("venue/<order_venue:venue>/create/", views.CreateShiftView.as_view(), name="shift_create"),
path("<shift:shift>/admin/", views.ShiftAdminView.as_view(), name="shift_admin"),
path("<shift:shift>/overview/", views.ShiftOverviewView.as_view(), name="shift_overview"),
path("<shift:shift>/join/", views.JoinShiftView.as_view(), name="shift_join"),
]
| true | true |
1c330d019b36bb4f9d251d15e0a2a2e7de7abdc7 | 3,956 | py | Python | paas-ce/paas/esb/components/bk/apisv2/bk_paas/create_app.py | renmcc/bk-PaaS | 1c9e4e9cfb40fc3375cd6b5f08af8c84203de246 | [
"Apache-2.0"
] | 767 | 2019-03-25T06:35:43.000Z | 2022-03-30T08:57:51.000Z | paas-ce/paas/esb/components/bk/apisv2/bk_paas/create_app.py | renmcc/bk-PaaS | 1c9e4e9cfb40fc3375cd6b5f08af8c84203de246 | [
"Apache-2.0"
] | 194 | 2019-03-29T07:16:41.000Z | 2022-03-30T06:17:49.000Z | paas-ce/paas/esb/components/bk/apisv2/bk_paas/create_app.py | renmcc/bk-PaaS | 1c9e4e9cfb40fc3375cd6b5f08af8c84203de246 | [
"Apache-2.0"
] | 381 | 2019-03-25T07:19:54.000Z | 2022-03-29T03:22:42.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import json
from django import forms
from components.component import Component
from common.forms import BaseComponentForm, ListField
from common.constants import API_TYPE_OP
from .toolkit import tools, configs
class CreateApp(Component):
"""
apiLabel {{ _("创建轻应用") }}
apiMethod POST
### {{ _("功能描述") }}
{{ _("创建轻应用") }}
### {{ _("请求参数") }}
{{ common_args_desc }}
#### {{ _("接口参数") }}
| {{ _("字段") }} | {{ _("类型") }} | {{ _("必选") }} | {{ _("描述") }} |
|-----------|------------|--------|------------|
| bk_light_app_name | string | {{ _("是") }} | {{ _("轻应用名称") }} |
| app_url | string | {{ _("是") }} | {{ _("应用链接") }} |
| developer | string | {{ _("是") }} | {{ _("应用开发者用户名,多个以分号';'分隔") }} |
| app_tag | string | {{ _("否") }} | {{ _('应用分类,可选分类: "OpsTools"(运维工具),"MonitorAlarm"(监控告警),"ConfManage"(配置管理),"DevTools"(开发工具),"EnterpriseIT"(企业IT),"OfficeApp"(办公应用),"Other"(其它)。如果传入空参数或不是上诉分类,则使用 "Other"') }} |
| introduction | string | {{ _("否") }} | {{ _("应用的简介") }} |
| width | int | {{ _("否") }} | {{ _("应用在桌面打开窗口宽度") }} |
| height | int | {{ _("否") }} | {{ _("应用在桌面打开窗口高度") }} |
### {{ _("请求参数示例") }}
```python
{
"bk_app_code": "gcloud",
"bk_app_secret": "xxx",
"bk_token": "xxx",
"bk_light_app_name": "轻应用测试",
"app_url": "http://test.bking.com/o/gcloud/xxx/",
"developer": "test1;test2",
"introduction": "introduction",
"width": 1024,
"height": 768
}
```
### {{ _("返回结果示例") }}
```python
{
"result": true,
"code": 0,
"message": "",
"data": {
"bk_light_app_code": "gcloud_fdfh2kl0k"
}
}
```
""" # noqa
sys_name = configs.SYSTEM_NAME
api_type = API_TYPE_OP
host = configs.host
class Form(BaseComponentForm):
bk_light_app_name = forms.CharField(label='bk light app name', required=True)
app_url = forms.CharField(label='app url', required=True)
developer = ListField(label='developer', required=True)
app_tag = forms.CharField(label='app tag', required=False)
introduction = forms.CharField(label='introduction', required=False)
width = forms.IntegerField(label='width', required=False)
height = forms.IntegerField(label='height', required=False)
def clean(self):
param_keys = [
'bk_light_app_name', 'app_url', 'developer',
'app_tag', 'introduction', 'width', 'height'
]
params = self.get_cleaned_data_when_exist(param_keys)
params['developer'] = ';'.join(params['developer'])
return params
def handle(self):
self.form_data['creator'] = self.current_user.username
self.form_data['bk_app_code'] = self.request.app_code
client = tools.PAASClient(self.outgoing.http_client)
self.response.payload = client.post(
host=self.host,
path='/paas/api/v2/create_app/',
data=json.dumps(self.form_data)
)
| 36.62963 | 305 | 0.566734 |
import json
from django import forms
from components.component import Component
from common.forms import BaseComponentForm, ListField
from common.constants import API_TYPE_OP
from .toolkit import tools, configs
class CreateApp(Component):
sys_name = configs.SYSTEM_NAME
api_type = API_TYPE_OP
host = configs.host
class Form(BaseComponentForm):
bk_light_app_name = forms.CharField(label='bk light app name', required=True)
app_url = forms.CharField(label='app url', required=True)
developer = ListField(label='developer', required=True)
app_tag = forms.CharField(label='app tag', required=False)
introduction = forms.CharField(label='introduction', required=False)
width = forms.IntegerField(label='width', required=False)
height = forms.IntegerField(label='height', required=False)
def clean(self):
param_keys = [
'bk_light_app_name', 'app_url', 'developer',
'app_tag', 'introduction', 'width', 'height'
]
params = self.get_cleaned_data_when_exist(param_keys)
params['developer'] = ';'.join(params['developer'])
return params
def handle(self):
self.form_data['creator'] = self.current_user.username
self.form_data['bk_app_code'] = self.request.app_code
client = tools.PAASClient(self.outgoing.http_client)
self.response.payload = client.post(
host=self.host,
path='/paas/api/v2/create_app/',
data=json.dumps(self.form_data)
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.