hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7906f7e2ae924545b9c80affa583f7faa97cbcdd
| 630
|
py
|
Python
|
setup.py
|
hopelife/mp_sync
|
d059c7983d7d92182e6b38d6efba473440bdf0d2
|
[
"MIT"
] | null | null | null |
setup.py
|
hopelife/mp_sync
|
d059c7983d7d92182e6b38d6efba473440bdf0d2
|
[
"MIT"
] | null | null | null |
setup.py
|
hopelife/mp_sync
|
d059c7983d7d92182e6b38d6efba473440bdf0d2
|
[
"MIT"
] | null | null | null |
from setuptools import setup
import mp_sync
setup(
name='mp_sync',
version=mp_sync.__version__,
description='Moon Package for Sync repository(google drive, notion, mongodb(local/web), local file)',
url='https://github.com/hopelife/mp_sync',
author='Moon Jung Sam',
author_email='monblue@snu.ac.kr',
license='MIT',
packages=['mp_sync'],
# entry_points={'console_scripts': ['mp_sync = mp_sync.__main__:main']},
keywords='scraper',
# python_requires='>=3.8', # Python 3.8.6-32 bit
# install_requires=[ # 패키지 사용을 위해 필요한 추가 설치 패키지
# 'selenium',
# ],
# zip_safe=False
)
| 30
| 105
| 0.655556
|
from setuptools import setup
import mp_sync
setup(
name='mp_sync',
version=mp_sync.__version__,
description='Moon Package for Sync repository(google drive, notion, mongodb(local/web), local file)',
url='https://github.com/hopelife/mp_sync',
author='Moon Jung Sam',
author_email='monblue@snu.ac.kr',
license='MIT',
packages=['mp_sync'],
keywords='scraper',
| true
| true
|
7906f83b320bda4c22c8898d12a63f535e7743d5
| 605
|
py
|
Python
|
setup.py
|
bneurd/bcpy
|
f52b64d3206c38f3131e91b4067a35765991891e
|
[
"MIT"
] | 2
|
2019-05-08T17:35:55.000Z
|
2020-03-06T18:23:40.000Z
|
setup.py
|
igornfaustino/bcpy
|
f52b64d3206c38f3131e91b4067a35765991891e
|
[
"MIT"
] | 17
|
2019-07-17T01:36:15.000Z
|
2020-05-02T13:22:27.000Z
|
setup.py
|
bneurd/bcpy
|
f52b64d3206c38f3131e91b4067a35765991891e
|
[
"MIT"
] | 1
|
2019-05-08T17:38:35.000Z
|
2019-05-08T17:38:35.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='bcpy',
version='0.1',
author='Igor Neves Faustino',
author_email='igornfaustino@gmail.com',
url='https://github.com/igornfaustino/bcpy.git',
description='library for BCI signal analysis',
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
packages=find_packages(),
# entry_points={
# 'console_scripts': ['forecastio = displayforecastio.app:run'],
# }
)
| 26.304348
| 72
| 0.682645
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='bcpy',
version='0.1',
author='Igor Neves Faustino',
author_email='igornfaustino@gmail.com',
url='https://github.com/igornfaustino/bcpy.git',
description='library for BCI signal analysis',
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
packages=find_packages(),
)
| true
| true
|
7906fc2c836240a009830a016f636644f00d7e9f
| 1,754
|
py
|
Python
|
urduhack/tokenization/wtk.py
|
cinfotech94/urduhackk
|
44500cd6a78e1a7765bb4f7d6fb92bbb612b7b11
|
[
"MIT"
] | 252
|
2018-08-20T16:16:45.000Z
|
2022-03-04T07:03:58.000Z
|
urduhack/tokenization/wtk.py
|
cinfotech94/urduhackk
|
44500cd6a78e1a7765bb4f7d6fb92bbb612b7b11
|
[
"MIT"
] | 111
|
2019-01-21T11:39:45.000Z
|
2021-09-30T07:26:50.000Z
|
urduhack/tokenization/wtk.py
|
cinfotech94/urduhackk
|
44500cd6a78e1a7765bb4f7d6fb92bbb612b7b11
|
[
"MIT"
] | 35
|
2019-02-09T14:29:36.000Z
|
2022-01-09T10:02:56.000Z
|
"""SentencePiece based word tokenizer module"""
from pathlib import Path
from typing import List
import sentencepiece as spm
from urduhack.stop_words import STOP_WORDS
def _is_token(pieces: list, special_symbol: str = "▁") -> List[str]:
"""
Check for stopwords and actual words in word pieces
Args:
pieces (list): word pieces returned by sentencepiece model
special_symbol (str): spm prefix special symbol for space
Returns:
List of decoded words
"""
decoded = []
for piece in pieces:
if special_symbol not in piece:
if piece in STOP_WORDS or len(piece) > 3:
piece = special_symbol + piece
decoded.append(piece)
else:
decoded.append(piece)
else:
decoded.append(piece)
return decoded
def _load_model(model_path: str) -> spm.SentencePieceProcessor:
"""
Loads pre_trained keras model and vocab file
Args:
model_path (str): Path to the spm model file
Returns:
spm model class instance
"""
spm_model = spm.SentencePieceProcessor()
spm_model.Load(model_file=model_path)
return spm_model
def _is_model_available(model_path: str) -> None:
"""
Check if the models file exist.
Args:
model_path (str): path to the tokenizer model file
Raises:
FileNotFoundError: If model_path does not exist
Returns: None
"""
if not Path(model_path).exists():
_error = "Word tokenizer Model not found!" \
"Please run 'urduhack download' in terminal." \
"Doc: https://urduhack.readthedocs.io/en/stable/installation.html#downloading-models"
raise FileNotFoundError(_error)
| 28.290323
| 102
| 0.641391
|
from pathlib import Path
from typing import List
import sentencepiece as spm
from urduhack.stop_words import STOP_WORDS
def _is_token(pieces: list, special_symbol: str = "▁") -> List[str]:
decoded = []
for piece in pieces:
if special_symbol not in piece:
if piece in STOP_WORDS or len(piece) > 3:
piece = special_symbol + piece
decoded.append(piece)
else:
decoded.append(piece)
else:
decoded.append(piece)
return decoded
def _load_model(model_path: str) -> spm.SentencePieceProcessor:
spm_model = spm.SentencePieceProcessor()
spm_model.Load(model_file=model_path)
return spm_model
def _is_model_available(model_path: str) -> None:
if not Path(model_path).exists():
_error = "Word tokenizer Model not found!" \
"Please run 'urduhack download' in terminal." \
"Doc: https://urduhack.readthedocs.io/en/stable/installation.html#downloading-models"
raise FileNotFoundError(_error)
| true
| true
|
7906fc4b0cb5090958dba5feba53118129fe2e91
| 322
|
py
|
Python
|
exercicios-turtle/.history/clown_20210623230605.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | 1
|
2021-11-28T18:49:21.000Z
|
2021-11-28T18:49:21.000Z
|
exercicios-turtle/.history/clown_20210623230605.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | null | null | null |
exercicios-turtle/.history/clown_20210623230605.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | null | null | null |
import turtle
tortuguita= turtle.Turtle()
tortuguita.speed(100)
tortuguita.dot(30,"black")
tortuguita.forward(15)
tortuguita.left(90)
tortuguita.circle(50)
tortuguita.circle(70)
tortuguita.circle(90)
tortuguita.right(90)
tortuguita.up()
tortuguita.forward(15)
tortuguita.down()
tortuguita.dot(30,"black")
turtle.done()
| 16.947368
| 27
| 0.785714
|
import turtle
tortuguita= turtle.Turtle()
tortuguita.speed(100)
tortuguita.dot(30,"black")
tortuguita.forward(15)
tortuguita.left(90)
tortuguita.circle(50)
tortuguita.circle(70)
tortuguita.circle(90)
tortuguita.right(90)
tortuguita.up()
tortuguita.forward(15)
tortuguita.down()
tortuguita.dot(30,"black")
turtle.done()
| true
| true
|
7906fc6645a886f9ed9950c4fedbead2d10eec99
| 7,164
|
py
|
Python
|
src/profit.py
|
dayuanyuan1989/SaveProfits
|
fcf86ab160eb7f9f064dfd25e9594dde2cc19ede
|
[
"MIT"
] | null | null | null |
src/profit.py
|
dayuanyuan1989/SaveProfits
|
fcf86ab160eb7f9f064dfd25e9594dde2cc19ede
|
[
"MIT"
] | null | null | null |
src/profit.py
|
dayuanyuan1989/SaveProfits
|
fcf86ab160eb7f9f064dfd25e9594dde2cc19ede
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import time, threading, uuid, sys
import tushare as ts
from PyQt4 import QtCore, QtGui
import utils
class ProfitStrategy(QtCore.QObject):
def init(self, b):
pass
def update_target(self, dp, p, t1, t2):
pass
def reset_target(self, b, p, t1, t2):
pass
class ProfitWideStrategy(QtCore.QObject):
def init(self, b):
dp = b
t1 = dp * 1.08
t2 = dp * 1.12
p = dp * 1.06
return (dp, p, t1, t2)
def update_target(self, dp, p, t1, t2):
dp = t1
t1 = dp * 1.08
t2 = dp * 1.12
p = dp * 1.06
return (dp, p, t1, t2)
def reset_target(self, dp, p, t1, t2):
t1 = dp
dp = t1 / 1.08
p = dp * 1.06
t2 = dp * 1.12
return (dp, p, t1, t2)
class ProfitThinStrategy(QtCore.QObject):
def init(self, b):
dp = b
t1 = dp * 1.08
t2 = dp * 1.12
p = dp * 1.06
return (dp, p, t1, t2)
def update_target(self, dp, p, t1, t2):
t1 = t2
dp = t1 / 1.08
p = dp * 1.06
t2 = p * 1.12
return (dp, p, t1, t2)
def reset_target(self, dp, p, t1, t2):
t2 = t1
dp = t2 / 1.08
p = dp * 1.06
t1 = dp * 1.12
return (dp, p, t1, t2)
class SaveProfit(QtCore.QObject):
_saveProfitSignal = QtCore.pyqtSignal(int)
_resetSignal = QtCore.pyqtSignal(int)
_targetSignal = QtCore.pyqtSignal(int, int)
def __init__(self, id, base_cost, strategy=ProfitWideStrategy()):
super(SaveProfit, self).__init__()
self._strategy = strategy
self._id = id
self._trigger_count = 0
self._trigge_target = False
self._base_cost = base_cost
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.init(self._base_cost)
def run(self, price):
self._temp_price = price
if self._trigge_target:
if price >= self._target2:
self._trigge_target = False
self._trigger_count += 1
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.update_target(self._dynamic_cost, self._profit, self._target1, self._target2)
self._targetSignal.emit(self._id, self._trigger_count)
elif price < self._profit:
#warning
print self.info()
self._saveProfitSignal.emit(self._id)
return False
elif price >= self._profit:
if self._base_cost > self._profit and price >= self._base_cost:
self._resetSignal.emit(self._id)
self._trigge_target = False
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.update_target(self._dynamic_cost, self._profit, self._target1, self._target2)
else:
last_profit = self._dynamic_cost / 1.08 * 1.06
if price >= self._target1:
self._trigge_target = True
elif price <= self._dynamic_cost:
self._trigge_target = True
self._trigger_count -= 1
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.reset_target(self._dynamic_cost, self._profit, self._target1, self._target2)
return True
def info(self):
return {
"dyprice" : self._dynamic_cost,
"target1" : self._target1,
"target2" : self._target2,
"profit" : self._profit,
"base" : self._base_cost,
"cur" : self._temp_price,
"trigged" : self._trigge_target,
"trigger_count" : self._trigger_count
}
class StcokWatcher(QtCore.QObject):
def __init__(self, stock_infos):
super(StcokWatcher, self).__init__()
self._stock_infos = stock_infos #code,price,name, triggered
self._on_watch = False
self._t = threading.Thread(target=self.on_watch)
self._t.setDaemon(True)
def init(self):
self._profiters = []
self._stocks = []
for i in range(len(self._stock_infos)):
stock_info = self._stock_infos[i]
self._stocks.append(stock_info['code'])
base_price = stock_info['base']
if (stock_info.has_key('stragegy') and stock_info['stragegy'] == 1):
profiter = SaveProfit(i, base_price, ProfitThinStrategy())
else:
profiter = SaveProfit(i, base_price)
self._profiters.append(profiter)
self._profiters[i]._saveProfitSignal.connect(self.on_warn)
self._profiters[i]._resetSignal.connect(self.on_reset)
df = ts.get_realtime_quotes(self._stocks)
for i in df.index:
quote = df.loc[i]
self._stock_infos[i]['name'] = (quote['name'])
def on_watch(self):
while self._on_watch:
df = ts.get_realtime_quotes(self._stocks)
print '-' * 30
print "股票名 触发 当前价格 成本价格 收益点 收益率 触发次数"
for i in df.index:
quote = df.loc[i]
self._profiters[i].run(float(quote['price']))
#print self._profiters[i].info()
info = self._profiters[i].info()
prate = (info["cur"] - info["base"]) * 100 / info["cur"]
prate = int(prate)]
triggerstr = '是' if info['trigged'] else '否'
print "%s %s %8.3f %8.3f %8.3f %8d%% %8d" % \
(self._stock_infos[i]['name'], triggerstr, info['cur'], info['base'], info['profit'], prate, info['trigger_count'])
#print info
time.sleep(3)
def on_warn(self, id):
#return
__business_id = uuid.uuid1()
profiter = self._profiters[id].info()
stock_info = self._stock_infos[id]
prate = (profiter["cur"] - profiter["base"]) * 100 / profiter["cur"]
prate = int(prate)
params = "{\"nm\":\"%s\",\"number\":\"%s\",\"in\":\"%.3f\",\"cur\":\"%.3f\",\"prate\":\"%d%%\"}" \
% (stock_info['name'], stock_info['code'], profiter["base"], profiter["cur"], prate)
if not stock_info.has_key('msg') or not stock_info['msg']:
print '+' * 40
print utils.send_sms(__business_id, "13564511106", "XK咨询", "SMS_94650115", params)
print '+' * 40
stock_info['msg'] = True
def on_reset(self, id):
self._stock_infos[id]['msg'] = False
def start(self):
self._on_watch = True
self._t.start()
if __name__ == "__main__":
stocks = [
{'code':'600516', 'base':34.313,'stragegy':1}, # 方大碳素
{'code':'002145', 'base':6.682}, # 中核钛白
{'code':'603079', 'base':69.819}, # 盛大科技
{'code':'002888', 'base':35.119}, # 惠威科技
{'code':'603826', 'base':20.609} # 坤彩科技
]
qApp = QtGui.QApplication(sys.argv)
watchers = StcokWatcher(stocks)
watchers.init()
watchers.start()
qApp.exec_()
| 36.927835
| 137
| 0.543691
|
import time, threading, uuid, sys
import tushare as ts
from PyQt4 import QtCore, QtGui
import utils
class ProfitStrategy(QtCore.QObject):
def init(self, b):
pass
def update_target(self, dp, p, t1, t2):
pass
def reset_target(self, b, p, t1, t2):
pass
class ProfitWideStrategy(QtCore.QObject):
def init(self, b):
dp = b
t1 = dp * 1.08
t2 = dp * 1.12
p = dp * 1.06
return (dp, p, t1, t2)
def update_target(self, dp, p, t1, t2):
dp = t1
t1 = dp * 1.08
t2 = dp * 1.12
p = dp * 1.06
return (dp, p, t1, t2)
def reset_target(self, dp, p, t1, t2):
t1 = dp
dp = t1 / 1.08
p = dp * 1.06
t2 = dp * 1.12
return (dp, p, t1, t2)
class ProfitThinStrategy(QtCore.QObject):
def init(self, b):
dp = b
t1 = dp * 1.08
t2 = dp * 1.12
p = dp * 1.06
return (dp, p, t1, t2)
def update_target(self, dp, p, t1, t2):
t1 = t2
dp = t1 / 1.08
p = dp * 1.06
t2 = p * 1.12
return (dp, p, t1, t2)
def reset_target(self, dp, p, t1, t2):
t2 = t1
dp = t2 / 1.08
p = dp * 1.06
t1 = dp * 1.12
return (dp, p, t1, t2)
class SaveProfit(QtCore.QObject):
_saveProfitSignal = QtCore.pyqtSignal(int)
_resetSignal = QtCore.pyqtSignal(int)
_targetSignal = QtCore.pyqtSignal(int, int)
def __init__(self, id, base_cost, strategy=ProfitWideStrategy()):
super(SaveProfit, self).__init__()
self._strategy = strategy
self._id = id
self._trigger_count = 0
self._trigge_target = False
self._base_cost = base_cost
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.init(self._base_cost)
def run(self, price):
self._temp_price = price
if self._trigge_target:
if price >= self._target2:
self._trigge_target = False
self._trigger_count += 1
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.update_target(self._dynamic_cost, self._profit, self._target1, self._target2)
self._targetSignal.emit(self._id, self._trigger_count)
elif price < self._profit:
print self.info()
self._saveProfitSignal.emit(self._id)
return False
elif price >= self._profit:
if self._base_cost > self._profit and price >= self._base_cost:
self._resetSignal.emit(self._id)
self._trigge_target = False
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.update_target(self._dynamic_cost, self._profit, self._target1, self._target2)
else:
last_profit = self._dynamic_cost / 1.08 * 1.06
if price >= self._target1:
self._trigge_target = True
elif price <= self._dynamic_cost:
self._trigge_target = True
self._trigger_count -= 1
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.reset_target(self._dynamic_cost, self._profit, self._target1, self._target2)
return True
def info(self):
return {
"dyprice" : self._dynamic_cost,
"target1" : self._target1,
"target2" : self._target2,
"profit" : self._profit,
"base" : self._base_cost,
"cur" : self._temp_price,
"trigged" : self._trigge_target,
"trigger_count" : self._trigger_count
}
class StcokWatcher(QtCore.QObject):
def __init__(self, stock_infos):
super(StcokWatcher, self).__init__()
self._stock_infos = stock_infos
self._on_watch = False
self._t = threading.Thread(target=self.on_watch)
self._t.setDaemon(True)
def init(self):
self._profiters = []
self._stocks = []
for i in range(len(self._stock_infos)):
stock_info = self._stock_infos[i]
self._stocks.append(stock_info['code'])
base_price = stock_info['base']
if (stock_info.has_key('stragegy') and stock_info['stragegy'] == 1):
profiter = SaveProfit(i, base_price, ProfitThinStrategy())
else:
profiter = SaveProfit(i, base_price)
self._profiters.append(profiter)
self._profiters[i]._saveProfitSignal.connect(self.on_warn)
self._profiters[i]._resetSignal.connect(self.on_reset)
df = ts.get_realtime_quotes(self._stocks)
for i in df.index:
quote = df.loc[i]
self._stock_infos[i]['name'] = (quote['name'])
def on_watch(self):
while self._on_watch:
df = ts.get_realtime_quotes(self._stocks)
print '-' * 30
print "股票名 触发 当前价格 成本价格 收益点 收益率 触发次数"
for i in df.index:
quote = df.loc[i]
self._profiters[i].run(float(quote['price']))
info = self._profiters[i].info()
prate = (info["cur"] - info["base"]) * 100 / info["cur"]
prate = int(prate)]
triggerstr = '是' if info['trigged'] else '否'
print "%s %s %8.3f %8.3f %8.3f %8d%% %8d" % \
(self._stock_infos[i]['name'], triggerstr, info['cur'], info['base'], info['profit'], prate, info['trigger_count'])
time.sleep(3)
def on_warn(self, id):
__business_id = uuid.uuid1()
profiter = self._profiters[id].info()
stock_info = self._stock_infos[id]
prate = (profiter["cur"] - profiter["base"]) * 100 / profiter["cur"]
prate = int(prate)
params = "{\"nm\":\"%s\",\"number\":\"%s\",\"in\":\"%.3f\",\"cur\":\"%.3f\",\"prate\":\"%d%%\"}" \
% (stock_info['name'], stock_info['code'], profiter["base"], profiter["cur"], prate)
if not stock_info.has_key('msg') or not stock_info['msg']:
print '+' * 40
print utils.send_sms(__business_id, "13564511106", "XK咨询", "SMS_94650115", params)
print '+' * 40
stock_info['msg'] = True
def on_reset(self, id):
self._stock_infos[id]['msg'] = False
def start(self):
self._on_watch = True
self._t.start()
if __name__ == "__main__":
stocks = [
{'code':'600516', 'base':34.313,'stragegy':1},
{'code':'002145', 'base':6.682},
{'code':'603079', 'base':69.819},
{'code':'002888', 'base':35.119},
{'code':'603826', 'base':20.609}
]
qApp = QtGui.QApplication(sys.argv)
watchers = StcokWatcher(stocks)
watchers.init()
watchers.start()
qApp.exec_()
| false
| true
|
7906fdb44ad72b320d627d874022f415dfccd5f9
| 724
|
py
|
Python
|
backend/function_park/dict_url.py
|
Mancid/mancid_project
|
4923264af324439658ad256444f3af6a4963e44f
|
[
"Unlicense"
] | 2
|
2021-05-12T14:10:16.000Z
|
2021-05-16T22:05:41.000Z
|
backend/function_park/dict_url.py
|
Mancid/mancid_project
|
4923264af324439658ad256444f3af6a4963e44f
|
[
"Unlicense"
] | 18
|
2021-05-11T14:24:05.000Z
|
2021-06-10T10:42:42.000Z
|
backend/function_park/dict_url.py
|
Mancid/mancid_project
|
4923264af324439658ad256444f3af6a4963e44f
|
[
"Unlicense"
] | 7
|
2021-05-01T17:50:54.000Z
|
2021-06-09T12:04:11.000Z
|
import configparser
import logging
def dict_url(conf):
"""Add all url from file url.ini with
key = name of the parking end value is
the url.
:returns: dictionnary with all parking and url
:rtype: dict
"""
url = configparser.ConfigParser()
logging.debug("initializing the variable url")
url.read(conf)
logging.debug("read the file")
logging.debug("all url in file %s", list(url["url"]))
res = {}
for simple_url in list(url["url"]):
parking = url["name"][simple_url]
link = url["url"][simple_url]
adress = url["adress"][simple_url]
res[parking] = link, adress
logging.info("this is the dict with keys and urls %s", res)
return res
| 27.846154
| 63
| 0.632597
|
import configparser
import logging
def dict_url(conf):
url = configparser.ConfigParser()
logging.debug("initializing the variable url")
url.read(conf)
logging.debug("read the file")
logging.debug("all url in file %s", list(url["url"]))
res = {}
for simple_url in list(url["url"]):
parking = url["name"][simple_url]
link = url["url"][simple_url]
adress = url["adress"][simple_url]
res[parking] = link, adress
logging.info("this is the dict with keys and urls %s", res)
return res
| true
| true
|
7906fe1dda882dea88ca1a8fc5c233216e829712
| 1,364
|
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2020 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible import context
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import become_loader
def test_ksu(mocker, parser, reset_cli_args):
options = parser.parse_args([])
context._init_global_context(options)
play_context = PlayContext()
default_cmd = "/bin/foo"
default_exe = "/bin/bash"
ksu_exe = 'ksu'
ksu_flags = ''
cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
assert cmd == default_cmd
success = 'BECOME-SUCCESS-.+?'
play_context.become = True
play_context.become_user = 'foo'
play_context.set_become_plugin(become_loader.get('ksu'))
play_context.become_method = 'ksu'
play_context.become_flags = ksu_flags
cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
assert (re.match("""%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, play_context.become_user, ksu_flags,
default_exe, success, default_cmd), cmd) is not None)
| 34.1
| 115
| 0.699413
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible import context
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import become_loader
def test_ksu(mocker, parser, reset_cli_args):
options = parser.parse_args([])
context._init_global_context(options)
play_context = PlayContext()
default_cmd = "/bin/foo"
default_exe = "/bin/bash"
ksu_exe = 'ksu'
ksu_flags = ''
cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
assert cmd == default_cmd
success = 'BECOME-SUCCESS-.+?'
play_context.become = True
play_context.become_user = 'foo'
play_context.set_become_plugin(become_loader.get('ksu'))
play_context.become_method = 'ksu'
play_context.become_flags = ksu_flags
cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
assert (re.match("""%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, play_context.become_user, ksu_flags,
default_exe, success, default_cmd), cmd) is not None)
| true
| true
|
7906fe8c0c4250c2bd332733e7df7947bda7c175
| 833
|
py
|
Python
|
tests/unit/utils/test_objects.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | 1
|
2022-03-17T07:22:23.000Z
|
2022-03-17T07:22:23.000Z
|
tests/unit/utils/test_objects.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/utils/test_objects.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from localstack.utils.objects import SubtypesInstanceManager
def test_subtypes_instance_manager():
class BaseClass(SubtypesInstanceManager):
def foo(self):
pass
class C1(BaseClass):
@staticmethod
def impl_name() -> str:
return "c1"
def foo(self):
return "bar"
instance1 = BaseClass.get("c1")
assert instance1
assert BaseClass.get("c1") == instance1
assert instance1.foo() == "bar"
with pytest.raises(Exception):
assert BaseClass.get("c2")
class C2(BaseClass):
@staticmethod
def impl_name() -> str:
return "c2"
def foo(self):
return "baz"
instance2 = BaseClass.get("c2")
assert BaseClass.get("c2") == instance2
assert instance2.foo() == "baz"
| 22.513514
| 60
| 0.596639
|
import pytest
from localstack.utils.objects import SubtypesInstanceManager
def test_subtypes_instance_manager():
class BaseClass(SubtypesInstanceManager):
def foo(self):
pass
class C1(BaseClass):
@staticmethod
def impl_name() -> str:
return "c1"
def foo(self):
return "bar"
instance1 = BaseClass.get("c1")
assert instance1
assert BaseClass.get("c1") == instance1
assert instance1.foo() == "bar"
with pytest.raises(Exception):
assert BaseClass.get("c2")
class C2(BaseClass):
@staticmethod
def impl_name() -> str:
return "c2"
def foo(self):
return "baz"
instance2 = BaseClass.get("c2")
assert BaseClass.get("c2") == instance2
assert instance2.foo() == "baz"
| true
| true
|
7906fef75d37ae9679879dcf0a4445e99d5c6983
| 3,153
|
py
|
Python
|
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowSecurityGroupRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'security_group_id': 'str'
}
attribute_map = {
'security_group_id': 'security_group_id'
}
def __init__(self, security_group_id=None):
"""ShowSecurityGroupRequest - a model defined in huaweicloud sdk"""
self._security_group_id = None
self.discriminator = None
self.security_group_id = security_group_id
@property
def security_group_id(self):
"""Gets the security_group_id of this ShowSecurityGroupRequest.
安全组资源ID
:return: The security_group_id of this ShowSecurityGroupRequest.
:rtype: str
"""
return self._security_group_id
@security_group_id.setter
def security_group_id(self, security_group_id):
"""Sets the security_group_id of this ShowSecurityGroupRequest.
安全组资源ID
:param security_group_id: The security_group_id of this ShowSecurityGroupRequest.
:type: str
"""
self._security_group_id = security_group_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowSecurityGroupRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.657895
| 89
| 0.574691
|
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowSecurityGroupRequest:
sensitive_list = []
openapi_types = {
'security_group_id': 'str'
}
attribute_map = {
'security_group_id': 'security_group_id'
}
def __init__(self, security_group_id=None):
self._security_group_id = None
self.discriminator = None
self.security_group_id = security_group_id
@property
def security_group_id(self):
return self._security_group_id
@security_group_id.setter
def security_group_id(self, security_group_id):
self._security_group_id = security_group_id
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ShowSecurityGroupRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790700693f26a7ad5bc91e870bd6cef3ab1d8c42
| 1,856
|
py
|
Python
|
src/SALib/test_functions/Ishigami.py
|
zjzh/SALib
|
b6b6b5cab3388f3b80590c98d66aca7dc784d894
|
[
"MIT"
] | 573
|
2015-07-14T06:17:59.000Z
|
2022-03-31T03:42:00.000Z
|
src/SALib/test_functions/Ishigami.py
|
QianWanghhu/SALib
|
95a3371e503f9253cb917b8f0101c0202b969c2b
|
[
"MIT"
] | 339
|
2015-07-08T13:30:16.000Z
|
2022-03-25T07:48:09.000Z
|
src/SALib/test_functions/Ishigami.py
|
QianWanghhu/SALib
|
95a3371e503f9253cb917b8f0101c0202b969c2b
|
[
"MIT"
] | 191
|
2015-07-13T09:00:07.000Z
|
2022-03-29T22:49:26.000Z
|
import numpy as np
def evaluate(X: np.ndarray, A: float = 7.0, B: float = 0.1) -> np.ndarray:
"""Non-monotonic Ishigami-Homma three parameter test function:
`f(x) = \sin(x_{1}) + A \sin(x_{2})^2 + Bx^{4}_{3}\sin(x_{1})`
This test function is commonly used to benchmark global sensitivity
methods as variance-based sensitivities of this function can be
analytically determined.
See listed references below.
In [2], the expected first-order indices are:
x1: 0.3139
x2: 0.4424
x3: 0.0
when A = 7, B = 0.1 when conducting Sobol' analysis with the
Saltelli sampling method with a sample size of 1000.
Parameters
----------
X : np.ndarray
An `N*D` array holding values for each parameter, where `N` is the
number of samples and `D` is the number of parameters
(in this case, three).
A : float
Constant `A` parameter
B : float
Constant `B` parameter
Returns
-------
Y : np.ndarray
References
----------
.. [1] Ishigami, T., Homma, T., 1990.
An importance quantification technique in uncertainty analysis for
computer models.
Proceedings. First International Symposium on Uncertainty Modeling
and Analysis.
https://doi.org/10.1109/ISUMA.1990.151285
.. [2] Saltelli, A., Ratto, M., Andres, T., Campolongo, F., Cariboni, J.,
Gatelli, D., Saisana, M., Tarantola, S., 2008.
Global Sensitivity Analysis: The Primer. Wiley, West Sussex, U.K.
https://dx.doi.org/10.1002/9780470725184
"""
Y = np.zeros(X.shape[0])
Y = np.sin(X[:, 0]) + A * np.power(np.sin(X[:, 1]), 2) + \
B * np.power(X[:, 2], 4) * np.sin(X[:, 0])
return Y
| 31.457627
| 79
| 0.566272
|
import numpy as np
def evaluate(X: np.ndarray, A: float = 7.0, B: float = 0.1) -> np.ndarray:
Y = np.zeros(X.shape[0])
Y = np.sin(X[:, 0]) + A * np.power(np.sin(X[:, 1]), 2) + \
B * np.power(X[:, 2], 4) * np.sin(X[:, 0])
return Y
| true
| true
|
790700a6c7ada62d1c8c4f6a2b1bb02d7eb4ee5f
| 87,629
|
py
|
Python
|
tests/lax_control_flow_test.py
|
cdfreeman-google/jax
|
ca6f8186a36a8962845289ffc6baed3e96390f68
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/lax_control_flow_test.py
|
cdfreeman-google/jax
|
ca6f8186a36a8962845289ffc6baed3e96390f68
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/lax_control_flow_test.py
|
cdfreeman-google/jax
|
ca6f8186a36a8962845289ffc6baed3e96390f68
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from functools import partial
import itertools
import operator
import re
from unittest import SkipTest
import textwrap
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import numpy.random as npr
import jax
from jax._src import api
from jax import core
from jax import lax
from jax import random
from jax import test_util as jtu
from jax import tree_util
from jax._src.util import unzip2
from jax.lib import xla_bridge
from jax.interpreters import xla
import jax.numpy as jnp # scan tests use numpy
import jax.scipy as jsp
from jax.config import config
config.parse_flags_with_absl()
# Some tests are useful for testing both lax.cond and lax.switch. This function
# provides a lax.cond-compatible interface to a two-branch lax.switch. Several
# tests in this file are parameterized such that they either call into lax.cond
# or into this function.
def cond_via_switch(pred, true_fun, false_fun, op, *args):
if len(args) > 0:
assert len(args) == 1
true_op, _true_fun, false_op, _false_fun = true_fun, false_fun, op, args[0]
op = (false_op, true_op)
false_fun = lambda op: _false_fun(op[0])
true_fun = lambda op: _true_fun(op[1])
index = lax.convert_element_type(pred, np.int32)
return lax.switch(index, [false_fun, true_fun], op)
COND_IMPLS = [
(lax.cond, 'cond'),
(cond_via_switch, 'switch'),
]
SCAN_IMPLS = [
(lax.scan, 'unroll1'),
(partial(lax.scan, unroll=2), 'unroll2'),
]
def while_loop_reference(cond, body, carry):
while cond(carry):
carry = body(carry)
return carry
def scan_reference(f, init, xs):
carry = init
ys = []
for x in xs:
(carry, y) = f(carry, x)
ys.append(lax.reshape(y, (1,) + np.shape(y)))
ys = lax.concatenate(ys, 0)
return carry, ys
def high_precision_dot(a, b):
return lax.dot(a, b, precision=lax.Precision.HIGHEST)
def posify(matrix):
return high_precision_dot(matrix, matrix.T.conj())
class LaxControlFlowTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
jax._src.lax.control_flow._initial_style_open_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxprs_with_common_consts.cache_clear()
def testWhileWithTuple(self):
limit = 10
def loop_cond(state):
pos, _ = state
return lax.lt(pos, limit)
def loop_body(state):
pos, count = state
return (lax.add(pos, 1), lax.add(count, 1))
def loop(init):
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
self.assertEqual(loop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(3), limit - 3)
def testWhileWithManyArgs(self):
nargs = 256
def loop_cond(state):
return lax.lt(state[0], 2)
def loop_body(state):
return tuple(lax.add(s, 1) for s in state)
_ = lax.while_loop(loop_cond, loop_body, (0,) * nargs)
def testNestedWhile(self):
def outer_loop(num): # pylint: disable=missing-docstring
def cond_fun(state):
num, i, _ = state
return lax.lt(i, num)
def body_fun(state):
num, i, count = state
return (num, lax.add(i, 1), inner_loop(i, count))
init_val = (num, 0, 0)
_, i, count = lax.while_loop(cond_fun, body_fun, init_val)
return (i, count)
def inner_loop(i, count): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, count = state
return (i, lax.add(j, 1), lax.add(count, 1))
init_val = (i, 0, count)
_, _, count = lax.while_loop(cond_fun, body_fun, init_val)
return count
cloop = api.jit(outer_loop)
self.assertEqual(outer_loop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(2), (2, 3))
self.assertEqual(cloop(4), (4, 10))
def testWhileWithClosure(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
return (lax.add(pos, 1), lax.add(count, inc))
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileWithClosureJit(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))
return api.jit(f)(pos, inc)
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileTypeErrors(self):
"""Test typing error messages for while."""
tuple_treedef = tree_util.tree_structure((1., 1.))
leaf_treedef = tree_util.tree_structure(0.)
with self.assertRaisesRegex(TypeError,
re.escape(f"cond_fun must return a boolean scalar, but got pytree {tuple_treedef}.")):
lax.while_loop(lambda c: (1., 1.), lambda c: c, 0.)
with self.assertRaisesRegex(TypeError,
re.escape("cond_fun must return a boolean scalar, but got output type(s) [ShapedArray(float32[])].")):
lax.while_loop(lambda c: np.float32(1.), lambda c: c, np.float32(0.))
with self.assertRaisesRegex(TypeError,
re.escape("body_fun output and input must have same type structure, "
f"got {tuple_treedef} and {leaf_treedef}.")):
lax.while_loop(lambda c: True, lambda c: (1., 1.), 0.)
with self.assertRaisesWithLiteralMatch(TypeError,
("body_fun output and input must have identical types, got\n"
"ShapedArray(bool[], weak_type=True)\n"
"and\n"
"ShapedArray(float32[]).")):
lax.while_loop(lambda c: True, lambda c: True, np.float32(0.))
def testNestedWhileWithDynamicUpdateSlice(self):
num = 5
def update_entry(arr, val, i, j):
val = lax.reshape(val, [1, 1])
return lax.dynamic_update_slice(arr, val, (i, j))
def outer_loop(arr): # pylint: disable=missing-docstring
def cond_fun(state):
i, num, _, _ = state
return lax.lt(i, num)
def body_fun(state):
i, num, arr, out = state
return (lax.add(i, 1), num, arr, inner_loop(i, arr, out))
out = np.zeros(arr.shape, dtype=arr.dtype)
init_val = (0, num, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
def inner_loop(i, arr, out): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, arr, out = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
arr_i_j = lax.dynamic_index_in_dim(arr_i, j, 0, False)
out = update_entry(out, arr_i_j, i, j)
return (i, lax.add(j, 1), arr, out)
init_val = (i, 0, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
cloop = api.jit(outer_loop)
arr = npr.RandomState(0).randn(5, 5)
self.assertAllClose(outer_loop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
def testLoopWithConjunctionCondition(self):
def sum_first_n(arr, num): # pylint: disable=missing-docstring
def cond_fun(state):
arr, num, i, _ = state
return lax.bitwise_and(lax.lt(i, num), lax.lt(i, arr.shape[0]))
def body_fun(state):
arr, num, i, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, num, lax.add(i, 1), lax.add(total, arr_i))
init_val = (arr, num, 0, 0.)
_, _, _, total = lax.while_loop(cond_fun, body_fun, init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testWhileLoopBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < 3, lambda x: x + 2, x)
ans = api.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = api.jit(fun)
ans = api.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopAxisIndexBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < lax.axis_index('i'), lambda x: x + 2, x)
ans = api.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0]))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = api.jit(fun)
ans = api.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0]))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopCondConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < y, lambda x: x + 2, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, np.array([2, 3]))
expected = np.array([2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopBodyConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < 3, lambda x: x + y, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, jnp.array([2, 3]))
expected = np.array([4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopTupleBatched(self):
def cond_fun(loop_carry):
x, y = loop_carry
return x + y < 5
def body_fun(loop_carry):
x, y = loop_carry
x = x + 1
return x, y
def fun(x, y):
return lax.while_loop(cond_fun, body_fun, (x, y))
ans = api.vmap(fun)(np.array([0, 0]), np.array([1, 2]))
expected = (np.array([4, 3]), np.array([1, 2]))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_issue_3204(self):
# Error during XLA code generation for vmap of nested loops
def test(a, b):
val = 0
i = 0
j = 0
condfun_1 = lambda inp: inp[1] < a + 1
condfun_2 = lambda inp: inp[2] < b + 1
def bodyfun_1(inp):
val, i, j = inp
j = 0
def bodyfun_2(inp):
val, i, j = inp
val += i + j
j += 1
return (val, i, j)
result = lax.while_loop(condfun_2, bodyfun_2, (val, i, j))
val = result[0]
i += 1
return (val, i, j)
result = lax.while_loop(condfun_1, bodyfun_1, (val, i, j))
return result[0]
arr = np.arange(5)
vmap_test = api.vmap(test, (0, 0))
vmap_test(arr, arr)
def testForiLoopErrors(self):
"""Test typing error messages for while."""
with self.assertRaisesRegex(
TypeError, "arguments to fori_loop must have equal types"):
lax.fori_loop(np.int16(0), jnp.int32(10), (lambda i, c: c), jnp.float32(7))
def testForiLoopBatched(self):
def body_fun(i, loop_carry):
x, y = loop_carry
x = x + 1
y = y + 2
return x, y
def fun(x):
return lax.fori_loop(0, 10, body_fun, (x, 0))
ans = api.vmap(fun)(np.array([0, 1]))
expected = (np.array([10, 11]), np.array([20, 20]))
self.assertAllClose(ans, expected, check_dtypes=False)
def testForiLoopBatchedIssue1190(self):
cond_fun = lambda carry: carry[0] < 4
body_fun = lambda carry: (carry[0] + 1, carry[1] + 1)
f = lambda x: lax.while_loop(cond_fun, body_fun, (0, x))
jaxpr = api.make_jaxpr(api.vmap(f))(jnp.arange(3))
eqn = jaxpr.jaxpr.eqns[0]
self.assertIs(eqn.primitive, lax.while_p)
self.assertEqual(eqn.params['cond_jaxpr'].in_avals[0].shape, ())
def testForiLoopBasic(self):
def body_fun(i, tot):
return lax.add(tot, i)
def count(num):
return lax.fori_loop(0, num, body_fun, 0)
self.assertEqual(count(2), 1)
self.assertEqual(count(3), 3)
self.assertEqual(count(4), 6)
for args_maker in [lambda: [2], lambda: [3], lambda: [4]]:
self._CompileAndCheck(count, args_maker)
def testForiLoopClosure(self):
def count(num):
def body_fun(i, tot):
return lax.add(num, lax.add(tot, i))
return lax.fori_loop(0, num, body_fun, 0)
cfun = api.jit(count)
self.assertEqual(count(2), 1 + 2**2)
self.assertEqual(count(2), cfun(2))
self.assertEqual(count(3), 3 + 3**2)
self.assertEqual(count(3), cfun(3))
self.assertEqual(count(4), 6 + 4**2)
self.assertEqual(count(4), cfun(4))
def testForiLoopTupleState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i))
init_val = (arr, 0.)
_, total = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun,
init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopDictState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state['arr'], state['total']
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return {'arr': arr, 'total': lax.add(total, arr_i)}
init_val = {'arr': arr, 'total': 0.}
out_val = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return out_val['total']
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopEmptyTupleInState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total, _ = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i), ())
init_val = (arr, 0., ())
_, tot, _ = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return tot
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testCond(self):
def fun(x):
if x < 3:
return (x, x)
else:
y = lax.mul(2, x)
return y, lax.mul(2, y)
@api.jit
def cfun(x):
def false_fun(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
return lax.cond(lax.lt(x, 3), lambda x: (x, x), false_fun, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(0), (0, 0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(1), (1, 1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(2), (2, 2))
self.assertEqual(fun(3), cfun(3))
self.assertEqual(fun(3), (6, 12))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(fun(4), (8, 16))
def testSwitch(self):
def branch(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun(x):
if x <= 0:
return branches[0](x)
elif x == 1:
return branches[1](x)
else:
return branches[2](x)
def cfun(x):
return lax.switch(x, branches, x)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
cfun = api.jit(cfun)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
def testSwitchResidualsMerge(self):
def get_conds(fun):
jaxpr = api.make_jaxpr(api.grad(fun))(0., 0)
return [eqn for eqn in jaxpr.jaxpr.eqns if eqn.primitive.name == 'cond']
def branch_invars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.invars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
def branch_outvars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.outvars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
branches1 = [
lambda x: jnp.sin(x),
lambda x: jnp.cos(x)] # branch residuals overlap, should be reused
branches2 = branches1 + [
lambda x: jnp.sinh(x)] # another overlapping residual, expect reuse
branches3 = branches2 + [
lambda x: jnp.sin(x) + jnp.cos(x)] # requires one more residual slot
def fun1(x, i):
return lax.switch(i + 1, branches1, x)
def fun2(x, i):
return lax.switch(i + 1, branches2, x)
def fun3(x, i):
return lax.switch(i + 1, branches3, x)
fwd1, bwd1 = get_conds(fun1)
fwd2, bwd2 = get_conds(fun2)
fwd3, bwd3 = get_conds(fun3)
fwd1_num_out = branch_outvars_len(fwd1)
fwd2_num_out = branch_outvars_len(fwd2)
fwd3_num_out = branch_outvars_len(fwd3)
assert fwd1_num_out == fwd2_num_out
assert fwd3_num_out == fwd2_num_out + 1
bwd1_num_in = branch_invars_len(bwd1)
bwd2_num_in = branch_invars_len(bwd2)
bwd3_num_in = branch_invars_len(bwd3)
assert bwd1_num_in == bwd2_num_in
assert bwd3_num_in == bwd2_num_in + 1
def testOneBranchSwitch(self):
branch = lambda x: -x
f = lambda i, x: lax.switch(i, [branch], x)
x = 7.
self.assertEqual(f(-1, x), branch(x))
self.assertEqual(f(0, x), branch(x))
self.assertEqual(f(1, x), branch(x))
cf = api.jit(f)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
cf = api.jit(f, static_argnums=0)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
def testIssue1379(self):
def fun(pred):
return lax.cond(pred, lambda x: (True, x), lambda x: (False, x), pred)
@api.jit
def cfun(pred):
return fun(pred)
self.assertEqual(fun(0), cfun(0), (False,0))
self.assertEqual(fun(0.), cfun(0.), (False,0.))
self.assertEqual(fun(1), cfun(1), (True,1))
self.assertEqual(fun(1.), cfun(1.), (True,1.))
# test that proper errors are raised for wrong types
for pred in ["abc", [], [1,2]]:
for f in [fun, cfun]:
self.assertRaises(TypeError, f, pred)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testNestedCond(self, cond):
def fun(x):
if x < 2:
return lax.mul(2, x)
else:
if x < 5:
return lax.mul(3, x)
else:
return lax.mul(4, x)
@api.jit
def cfun(x):
return cond(
lax.lt(x, 2),
lambda x: lax.mul(2, x),
lambda x: cond(lax.lt(x, 5),
x, lambda x: lax.mul(3, x),
4, lambda y: lax.mul(y, x)),
x)
self.assertEqual(cfun(1), 2)
self.assertEqual(cfun(3), 9)
self.assertEqual(cfun(6), 24)
self.assertEqual(cfun(1), fun(1))
self.assertEqual(cfun(3), fun(3))
self.assertEqual(cfun(6), fun(6))
def testCondTypeErrors(self):
"""Test typing error messages for cond."""
with self.assertRaisesRegex(TypeError,
re.escape("Pred type must be either boolean or number, got <function")):
lax.cond(lambda x: True, lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got foo of type <class 'str'>")):
lax.cond("foo", lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got (1.0, 1.0) of type <class 'tuple'>")):
lax.cond((1., 1.), lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("true_fun and false_fun output must have same type structure, "
f"got {tree_util.tree_structure(2.)} and {tree_util.tree_structure((3., 3.))}.")):
lax.cond(True, lambda top: 2., lambda fop: (3., 3.), 1.)
with self.assertRaisesRegex(
TypeError, textwrap.dedent(
r"""
true_fun and false_fun output must have identical types, got
ShapedArray\(float32\[1\]\)
and
ShapedArray\(float32\[\].*\).""").strip()):
lax.cond(True,
lambda top: jnp.array([1.], jnp.float32),
lambda fop: jnp.float32(1.),
1.)
def testSwitchErrors(self):
"""Test typing error messages for switch."""
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got <function")):
lax.switch(lambda x: True, [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got foo.")):
lax.switch("foo", [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Branch index must be scalar, got (1.0, 1.0) of shape (2,).")):
lax.switch((1., 1.), [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(ValueError,
re.escape("Empty branch sequence")):
lax.switch(0, [], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("branch 0 and 1 outputs must have same type structure, "
f"got {tree_util.tree_structure(2.)} and {tree_util.tree_structure((3., 3.))}.")):
lax.switch(1, [lambda _: 2., lambda _: (3., 3.)], 1.)
with self.assertRaisesRegex(
TypeError, textwrap.dedent(
r"""
branch 0 and 1 outputs must have identical types, got
ShapedArray\(float32\[1\]\)
and
ShapedArray\(float32\[\].*\).""").strip()):
lax.switch(1, [lambda _: jnp.array([1.], jnp.float32),
lambda _: jnp.float32(1.)],
1.)
def testCondOneBranchConstant(self):
def fun(x):
if x < 3:
return 5.
else:
return x
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3), lambda x: 5, lambda x: x, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), 5)
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), 4)
def testCondOneBranchConstantTuple(self):
def fun(x):
if x < 3:
return (1., 2., 3.)
else:
return (x, 2., 4.)
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3),
lambda x: (1, 2., 3.),
lambda x: (x, 2., 4.),
x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), (1, 2., 3.))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), (4, 2., 4.))
def testCondBatched(self):
def fun(x, y, z):
pred = lax.lt(x, 3)
true_fun = lambda y: y
false_fun = lambda z: lax.neg(z)
return lax.cond(pred, y, true_fun, z, false_fun)
# these cases stay as cond
x = jnp.array(2)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(4)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = api.jit(fun)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = api.vmap(fun, (None, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None)))(x, y, z)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
# these cases become select
x = jnp.array([2, 4])
ans = api.vmap(fun, (0, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None)))(x, y, z)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
ans = api.vmap(fun)(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testSwitchBatched(self):
def fun(index, x, y, z):
branches = [lambda xyz: xyz[0],
lambda xyz: lax.neg(xyz[1]),
lambda xyz: lax.sign(xyz[2])]
return lax.switch(index, branches, (x, y, z))
# these cases stay as cond
x = jnp.array(0)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
w = jnp.array(9)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(1)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = api.jit(fun)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = api.vmap(fun, (None, 0, None, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None, None)))(x, y, z, w)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
# these cases become select
x = jnp.array([0, 1])
ans = api.vmap(fun, (0, 0, None, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None, None)))(x, y, z, w)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
w = jnp.array([9, 9])
ans = api.vmap(fun)(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z, w)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testCondJVP(self):
def fun_ref(x):
if x < 3:
return (x, x)
else:
y = 2 * x
return y, 2 * y
def fun(x):
def false_fun(x):
y = 2 * x
return y, 2 * y
return lax.cond(x < 3, lambda x: (x, x), false_fun, x)
x = 3.14
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testSwitchJVP(self):
def branch(x):
y = 2 * x
return y, 2 * y
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
def fun(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJVP2(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testCondGrad(self):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
x = 2.14
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
def testCondGradVmapNan(self):
eps = 1e-3
def safe1(x):
return lax.cond(x < eps, lambda _: eps, lambda _: jnp.sqrt(x), ())
out = api.grad(lambda x: api.vmap(safe1)(x).sum())(np.zeros(10))
self.assertFalse(np.isnan(out).any())
def testSwitchGrad(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
def testSwitchGradWithWeakTypeMismatch(self): # issue #4696, PR #4896
dtype = jnp.ones(1).dtype
dtype = jnp.float32 if dtype == jnp.float32 else jnp.float64
branches = [
lambda x: x, # This preserves the weak type of x.
lambda x: x + dtype(1), # This strips the weak type of x.
]
def f_ref(x):
i = x.astype(jnp.int32)
return branches[i](x)
def f(x):
return lax.switch(x.astype(jnp.int32), branches, x)
for x in [0., 1.]:
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad2(self, cond):
def f_ref(x):
z = jnp.array([1., 2.]) * x if x[0] < 2 else jnp.sin(x)
return z.sum()
def _f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.]) * x,
lambda x: jnp.sin(x),
x)
f = lambda x: api.jit(_f)(x).sum()
x = 2.14 * jnp.ones(2)
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72 * jnp.ones(2)
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"],
rtol={jnp.float32: 1e-2, jnp.float64: 2e-3})
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad3(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = api.grad(fun)(x)
expected = api.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
x = 2.72
ans = api.grad(fun)(x)
expected = api.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad4(self, cond):
def fun_ref(x, y):
if x < 3:
return 2. * jnp.sin(y)
else:
return 2. * jnp.cos(x)
def fun(x, y):
return cond(
x < 3,
(), lambda _: 2. * jnp.sin(y),
x, lambda x: 2. * x)
y = 5.8
x = 3.14
ans = api.grad(fun, 1)(x, y)
expected = api.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
x = 2.72
ans = api.grad(fun, 1)(x, y)
expected = api.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
def testCondLinearize(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y, f_lin = api.linearize(f, 1.)
self.assertAllClose(y, 3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = api.linearize(f, 4.)
self.assertAllClose(y, jnp.sin(4.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(4.) * 2., check_dtypes=False)
def testSwitchLinearize(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
# branch 0
y, f_lin = api.linearize(f, -1.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = api.linearize(f, 0.)
self.assertAllClose(y, 0., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
# branch 1
y, f_lin = api.linearize(f, 1.)
self.assertAllClose(y, jnp.sin(1.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(1.) * 2., check_dtypes=False)
# branch 2
y, f_lin = api.linearize(f, 2.)
self.assertAllClose(y, -2., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
y, f_lin = api.linearize(f, 3.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondLinearize2(self, cond):
def f_ref(x):
z = jnp.array([1., 2.]) * x if x[0] < 2 else jnp.cos(jnp.sin(x))
return z.sum()
def f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.]) * x,
lambda x: jnp.cos(jnp.sin(x)),
x).sum()
x = 2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
x = -2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
f = api.jit(f)
x = 2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
def testCondJit(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = api.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
def testSwitchJit(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-1., 0., 1., 2., 3.]:
y = api.jit(f)(x)
expected = f(x)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitDisabled(self, cond):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
def f(x):
return cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
with api.disable_jit():
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
with api.disable_jit():
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
def f_ref(x):
if x < 2:
return np.array([1., 2.]) * x
else:
return np.array([3., 4.]) * np.sin(x)
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = f(4.)
expected = f_ref(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = api.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondVmapGrad(self, cond):
# https://github.com/google/jax/issues/2264
def f_1(x): return x ** 2
def f_2(x): return x ** 3
def f(x): return cond(x > 0, f_1, f_2, x)
def g(x): return jnp.where(x > 0, f_1(x), f_2(x))
x = jnp.linspace(-1, 1, 20)
ans = api.vmap(api.grad(f))(x)
expected = api.vmap(api.grad(g))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testIssue1263(self):
def f(rng, x):
cond = random.bernoulli(rng)
return lax.cond(cond, x, lambda x: x, jnp.abs(x) - 1., lambda x: x)
def body_fn(i, state):
rng, x = state
key, subkey = random.split(rng)
return key, f(subkey, x)
def g(rng, x):
return lax.fori_loop(0, 10, body_fn, (rng, x))
api.vmap(g)(random.split(random.PRNGKey(0), 3), jnp.ones((3, 4)))
def testIssue514(self):
# just check this doesn't crash
lax.cond(True,
(0, 0), lambda x: (x[0], 0),
(1, 1), lambda x: x)
def testIssue649(self):
from jax import lax
def body(x):
a, b = x
return (7, b + 1)
def cond(x):
a, b = x
return b < 10
out = lax.while_loop(cond, body, (33, 4))
self.assertEqual(out, (7, 10))
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanImpl(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = scan(f, c, as_)
expected = scan_reference(f, c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanJVP(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.jvp( lambda c, as_: scan(f, c, as_), (c, as_), (c, as_))
expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float64: 1e-14, np.float32: 1e-5})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanLinearize(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.linearize(lambda c, as_: scan(f, c, as_), c, as_)[1](c, as_)
expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float64: 1e-14})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanGrad(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.sum(jnp.sin(a)) + jnp.sum(jnp.sin(c)) + jnp.sum(jnp.sin(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.grad(lambda c, as_: list( scan(f, c, as_))[0].sum())(c, as_)
expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float32: 2e-5, np.float64: 1e-13})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["rev"],
atol=1e-3, rtol=5e-3)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanRnn(self):
r = npr.RandomState(0)
n_in = 4
n_hid = 2
n_out = 1
length = 3
W_trans = r.randn(n_hid, n_hid + n_in).astype(jnp.float_)
W_out = r.randn(n_out, n_hid + n_in).astype(jnp.float_)
params = W_trans, W_out
inputs = r.randn(length, n_in).astype(jnp.float_)
targets = r.randn(length, n_out).astype(jnp.float_)
def step(params, state, input):
W_trans, W_out = params
stacked = jnp.concatenate([state, input])
output = jnp.tanh(jnp.dot(W_out, stacked))
next_state = jnp.tanh(jnp.dot(W_trans, stacked))
return next_state, output
def rnn(params, inputs):
init_state = jnp.zeros(n_hid)
_, outputs = lax.scan(partial(step, params), init_state, inputs)
return outputs
def loss(params, inputs, targets):
predictions = rnn(params, inputs)
return jnp.sum((predictions - targets)**2)
# evaluation doesn't crash
loss(params, inputs, targets)
# jvp evaluation doesn't crash
api.jvp(lambda params: loss(params, inputs, targets), (params,), (params,))
# jvp numerical check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, modes=["fwd"],
rtol={np.float32: 2e-2, np.float64: 1e-6})
# linearize works
_, expected = api.jvp(loss, (params, inputs, targets),
(params, inputs, targets))
_, linfun = api.linearize(loss, params, inputs, targets)
ans = linfun(params, inputs, targets)
self.assertAllClose(ans, expected, check_dtypes=False)
# gradient evaluation doesn't crash
api.grad(loss)(params, inputs, targets)
# gradient check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, rtol=2e-2)
# we can vmap to batch things
batch_size = 7
batched_inputs = r.randn(batch_size, length, n_in).astype(jnp.float_)
batched_targets = r.randn(batch_size, length, n_out).astype(jnp.float_)
batched_loss = api.vmap(lambda x, y: loss(params, x, y))
losses = batched_loss(batched_inputs, batched_targets)
expected = np.stack(list(map(lambda x, y: loss(params, x, y),
batched_inputs, batched_targets)))
self.assertAllClose(losses, expected, check_dtypes=False, rtol=1e-2)
def testIssue711(self):
# Tests reverse-mode differentiation through a scan for which the scanned
# function also involves reverse-mode differentiation.
# See https://github.com/google/jax/issues/711
def harmonic_bond(conf, params):
return jnp.sum(conf * params)
def minimize_structure(test_params):
energy_fn = partial(harmonic_bond, params=test_params)
def apply_carry(carry, _):
i, x = carry
new_x = x - 0.1 * api.grad(energy_fn)(x)
new_carry = (i+1, new_x)
return new_carry, _
x0 = jnp.array([1., 2., 3.])
carry_final, _ = lax.scan(apply_carry, (0, x0), jnp.zeros((75, 0)))
_, x_final = carry_final
return x_final
initial_params = 0.5
minimize_structure(initial_params) # doesn't crash
def loss(test_params):
x_final = minimize_structure(test_params)
return jnp.sum(jnp.sin(1.0 - x_final))
api.grad(loss)(0.25) # doesn't crash
def testIssue744(self):
Point = collections.namedtuple('Point', ['x', 'y'])
p0 = Point(x=jnp.array(1), y=jnp.array(2))
def plus_one(p, iter_idx):
return Point(p.x+1, p.y+1), iter_idx
self.assertRaisesRegex(
ValueError,
'scan got value with no leading axis to scan over.*',
lambda: lax.scan(plus_one, p0, list(range(5))))
def testScanTypeErrors(self):
"""Test typing error messages for scan."""
a = jnp.arange(5)
# Body output not a tuple
with self.assertRaisesRegex(TypeError,
re.escape("scan body output must be a pair, got ShapedArray(float32[]).")):
lax.scan(lambda c, x: np.float32(0.), 0, a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure((0, 0, 0,))} "
f"and {tree_util.tree_structure((1, (2, 3)))}")):
lax.scan(lambda c, x: ((0, 0, 0), x), (1, (2, 3)), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure(a)} and {tree_util.tree_structure(None)}.")):
lax.scan(lambda c, x: (0, x), None, a)
with self.assertRaisesWithLiteralMatch(
TypeError,
"scan carry output and input must have identical types, got\n"
"ShapedArray(int32[])\n"
"and\n"
"ShapedArray(float32[])."):
lax.scan(lambda c, x: (np.int32(0), x), np.float32(1.0), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure(a)} and {tree_util.tree_structure((1, 2))}.")):
lax.scan(lambda c, x: (0, x), (1, 2), a)
@parameterized.named_parameters(
{"testcase_name": "_{}".format(scan_name),
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS)
def testScanHigherOrderDifferentiation(self, scan):
d = 0.75
def f(c, a):
b = jnp.sin(c * jnp.sum(jnp.cos(d * a)))
c = 0.9 * jnp.cos(d * jnp.sum(jnp.sin(c * a)))
return c, b
as_ = jnp.arange(6.).reshape((3, 2))
c = 1.
jtu.check_grads(lambda c, as_: scan(f, c, as_), (c, as_),
modes=["rev"], order=2, rtol={np.float32: 6e-3})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_in_axes={}_impl={}".format(
jit_scan, jit_f, in_axes, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "in_axes": in_axes,
"scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS
for in_axes in itertools.product([None, 0, 1], [None, 0, 1, 2])
if in_axes != (None, None))
def testScanVmap(self, jit_scan, jit_f, in_axes, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_shape = [5, 3]
c_shape = [4]
c_bdim, as_bdim = in_axes
if c_bdim is not None:
c_shape.insert(c_bdim, 7)
if as_bdim is not None:
as_shape.insert(as_bdim, 7)
as_ = rng.randn(*as_shape)
c = rng.randn(*c_shape)
ans = api.vmap(lambda c, as_: scan(f, c, as_), in_axes)(c, as_)
expected = api.vmap(lambda c, as_: scan_reference(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol=1e-5, atol=1e-5)
def testScanVmapTuples(self):
def f(c, a):
a1, a2 = a
c1, c2 = c
b = jnp.sum(jnp.cos(a1)) * jnp.sum(jnp.tan(c2 * a2))
c = c1 * jnp.sin(jnp.sum(a1 * a2)), c2 * jnp.cos(jnp.sum(a1))
return c, b
in_axes = (0, (1, 2))
r = np.random.RandomState(0)
as_ = (r.randn(3, 7), r.randn(3, 4, 7))
c = (r.randn(7, 2), r.randn(7))
expected_c_out, expected_bs = [], []
for i in range(7):
c_out, bs = lax.scan(f, (c[0][i], c[1][i]), (as_[0][:,i], as_[1][:,:,i]))
expected_c_out.append(c_out)
expected_bs.append(bs)
expected_c_out_0, expected_c_out_1 = unzip2(expected_c_out)
expected_c_out = (jnp.stack(expected_c_out_0), jnp.stack(expected_c_out_1))
expected_bs = jnp.stack(expected_bs)
expected = expected_c_out, expected_bs
ans = api.vmap(lambda c, as_: lax.scan(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
def testScanVmapFixpoint(self):
def f(carry_init):
def scan_body(c, x):
# The carry is a 4-tuple, the last element starts batched,
# and the carry is shifted left at each iteration.
return ((c[1], c[2], c[3], 0.), None)
return lax.scan(scan_body, (0., 1., 2., carry_init), jnp.zeros(2))
carry_init = jnp.array([3., 4., 5.])
carry_out, _ = api.vmap(f)(carry_init)
self.assertAllClose(carry_out[3], jnp.array([0., 0., 0.]), check_dtypes=False)
self.assertAllClose(carry_out[2], jnp.array([0., 0., 0.]), check_dtypes = False)
# After two shifts, we get the carry_init
self.assertAllClose(carry_out[1], carry_init, check_dtypes=False)
self.assertAllClose(carry_out[0], jnp.array([2., 2., 2.]), check_dtypes = False)
def testIssue757(self):
# code from https://github.com/google/jax/issues/757
def fn(a):
return jnp.cos(a)
def loop(val):
iterations = 10
def apply_carry(x, i):
return api.grad(fn, argnums=(0,))(x)[0], i
final_val, _ = lax.scan(
apply_carry,
val,
jnp.arange(iterations)
)
return final_val
arg = 0.5
api.jit(api.jacfwd(loop, argnums=(0,)))(arg) # doesn't crash
def testIssue804(self):
num_devices = xla_bridge.device_count()
f = partial(lax.scan, lambda c, x: (c + lax.psum(x, "i") , c), 0.)
api.pmap(f, axis_name="i")(jnp.ones((num_devices, 4))) # doesn't crash
def testMap(self):
f = lambda x: x ** 2
xs = jnp.arange(10)
expected = xs ** 2
actual = lax.map(f, xs)
self.assertAllClose(actual, expected)
def testMapEmpty(self):
# https://github.com/google/jax/issues/2412
ans = lax.map(lambda x: x * x, jnp.array([]))
expected = jnp.array([])
self.assertAllClose(ans, expected)
def testCaching(self):
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testCaching2(self):
# This second caching test shows a different kind of caching that we haven't
# implemented (but could!), namely that Python functions that are distinct
# objects but are equivalent functions trigger cache hits. This kind of
# caching could be salient when using lambda functions with control flow:
#
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
#
# To get a cache hit on the second line we'd need to form a jaxpr and
# compare them for equality (including the literals on identity). We could
# implement that by adding a __hash__/__eq__ to core.Jaxpr and
# core.ClosedJaxpr (see #1221).
raise SkipTest("not implemented")
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testWhileCondConstant(self):
out = lax.while_loop(lambda _: False, lambda _: (), ()) # doesn't crash
self.assertEqual(out, ())
@parameterized.named_parameters(
{"testcase_name": "_jit_loop={}_jit_body={}_jit_cond={}".format(
jit_loop, jit_body, jit_cond),
"jit_loop": jit_loop, "jit_body": jit_body, "jit_cond": jit_cond}
for jit_loop in [False, True]
for jit_body in [False, True]
for jit_cond in [False, True])
def testWhileJVP(self, jit_loop=True, jit_body=False, jit_cond=True):
cond = lambda x: x[0, 2] <= 8
body = lambda x: x * x
if jit_cond:
cond = api.jit(cond)
if jit_body:
body = api.jit(body)
loop = partial(lax.while_loop, cond, body)
if jit_loop:
loop = api.jit(loop)
loop_ref = partial(while_loop_reference, cond, body)
x = jnp.arange(9.).reshape((3, 3))
ans = api.jvp(loop, (x,), (x,))
expected = api.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop, (x,), order=2, modes=["fwd"])
def testWhileJVPViaForiLoop(self):
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * 2, x)
self.assertAllClose(f(2.), 16., check_dtypes=False)
self.assertAllClose(api.jvp(f, (2.,), (1.,)), (16., 8.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * (i + 1), x)
self.assertAllClose(f(2.), 12., check_dtypes=False)
self.assertAllClose(api.jvp(f, (2.,), (1.,)), (12., 6.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
def testWhileJVPWithGrowingNonzeroTangents(self):
rng = np.random.RandomState(0)
def cond(state):
i, x, y, z = state
return i < 2
def body(state):
i, x, y, z = state
y = x * x
z = y * y
return i + 1, x, y, z
y, z = rng.randn(2), rng.randn(2)
def loop(loop_impl, x):
return loop_impl(cond, body, (0, x, y, z))[1]
loop_lax = partial(loop, lax.while_loop)
loop_ref = partial(loop, while_loop_reference)
x = rng.randn(2)
ans = api.jvp(loop_lax, (x,), (x,))
expected = api.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop_lax, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
dict(testcase_name="_loop={}".format(loop), loop=loop)
for loop in ["while", "fori", "fori_inside_cond", "fori_inside_scan"])
def testWhileGradError(self, loop: str = "fori_inside_scan"):
# Raise error for vjp for loops
if loop == "while":
func = lambda x: lax.while_loop(lambda i: i < 5., lambda i: i + 1., x)
elif loop == "fori":
func = lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x)
elif loop == "fori_inside_jit":
func = api.jit(lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x))
elif loop == "fori_inside_cond":
func = lambda x: lax.cond(True, x,
lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x),
1., lambda x: x)
elif loop == "fori_inside_scan":
func = lambda x: lax.scan(lambda c, x: (lax.fori_loop(x, x + 2., lambda i, c1: c1 * c, x),
None),
x, np.ones(2))[0]
else:
assert False
with self.assertRaisesRegex(ValueError, "Reverse-mode differentiation does not work for lax.while_loop"):
api.grad(func)(1.)
api.linearize(func, 1.) # Linearization works
def testIssue1316(self):
def f(carry, _):
c, key = carry
key, _ = random.split(key)
return (c, key), ()
key = random.PRNGKey(0)
api.grad(lambda c: lax.scan(f, (c, key), np.ones(3))[0][0])(0.) # doesn't crash
def testIssue1361(self):
@api.jit
def jit_run_scan(x):
def fun(carry, _):
x, _ = carry
return (2 * x, 0.), None
(x, _), _ = lax.scan(fun, (x, 0.), jnp.arange(3))
return x
api.grad(lambda x: jit_run_scan(x))(0.) # doesn't crash
def test_custom_root_scalar(self):
def scalar_solve(f, y):
return y / f(1.0)
def binary_search(func, x0, low=0.0, high=100.0):
del x0 # unused
def cond(state):
low, high = state
midpoint = 0.5 * (low + high)
return (low < midpoint) & (midpoint < high)
def body(state):
low, high = state
midpoint = 0.5 * (low + high)
update_upper = func(midpoint) > 0
low = jnp.where(update_upper, low, midpoint)
high = jnp.where(update_upper, midpoint, high)
return (low, high)
solution, _ = lax.while_loop(cond, body, (low, high))
return solution
def sqrt_cubed(x, tangent_solve=scalar_solve):
f = lambda y: y ** 2 - x ** 3
return lax.custom_root(f, 0.0, binary_search, tangent_solve)
value, grad = api.value_and_grad(sqrt_cubed)(5.0)
self.assertAllClose(value, 5 ** 1.5, check_dtypes=False, rtol=1e-6)
self.assertAllClose(grad, api.grad(pow)(5.0, 1.5), check_dtypes=False,
rtol=1e-7)
jtu.check_grads(sqrt_cubed, (5.0,), order=2,
rtol={jnp.float32: 1e-2, jnp.float64: 1e-3})
inputs = jnp.array([4.0, 5.0])
results = api.vmap(sqrt_cubed)(inputs)
self.assertAllClose(results, inputs ** 1.5, check_dtypes=False)
results = api.jit(sqrt_cubed)(5.0)
self.assertAllClose(results, 5.0 ** 1.5, check_dtypes=False,
rtol={np.float64:1e-7})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_root_vector_with_solve_closure(self):
def vector_solve(f, y):
return jnp.linalg.solve(api.jacobian(f)(y), y)
def linear_solve(a, b):
f = lambda y: high_precision_dot(a, y) - b
x0 = jnp.zeros_like(b)
solution = jnp.linalg.solve(a, b)
oracle = lambda func, x0: solution
return lax.custom_root(f, x0, oracle, vector_solve)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(linear_solve, (a, b), order=2,
atol={np.float32: 1e-2, np.float64: 1e-11})
actual = api.jit(linear_solve)(a, b)
expected = jnp.linalg.solve(a, b)
self.assertAllClose(expected, actual)
def test_custom_root_with_custom_linear_solve(self):
def linear_solve(a, b):
f = lambda x: high_precision_dot(a, x) - b
factors = jsp.linalg.cho_factor(a)
cho_solve = lambda f, b: jsp.linalg.cho_solve(factors, b)
def pos_def_solve(g, b):
return lax.custom_linear_solve(g, b, cho_solve, symmetric=True)
return lax.custom_root(f, b, cho_solve, pos_def_solve)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
actual = linear_solve(high_precision_dot(a, a.T), b)
expected = jnp.linalg.solve(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual)
actual = api.jit(linear_solve)(high_precision_dot(a, a.T), b)
expected = jnp.linalg.solve(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual)
jtu.check_grads(lambda x, y: linear_solve(high_precision_dot(x, x.T), y),
(a, b), order=2, rtol={jnp.float32: 1e-2})
def test_custom_root_errors(self):
with self.assertRaisesRegex(TypeError, re.escape("f() output pytree")):
lax.custom_root(lambda x: (x, x), 0.0, lambda f, x: x, lambda f, x: x)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_root(lambda x: x, 0.0, lambda f, x: (x, x), lambda f, x: x)
def dummy_root_usage(x):
f = lambda y: x - y
return lax.custom_root(f, 0.0, lambda f, x: x, lambda f, x: (x, x))
with self.assertRaisesRegex(
TypeError, re.escape("tangent_solve() output pytree")):
api.jvp(dummy_root_usage, (0.0,), (0.0,))
@parameterized.named_parameters(
{"testcase_name": "nonsymmetric", "symmetric": False},
{"testcase_name": "symmetric", "symmetric": True},
)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve(self, symmetric):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(
matvec, b, explicit_jacobian_solve, explicit_jacobian_solve,
symmetric=symmetric)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
if symmetric:
a = a + a.T
b = rng.randn(3)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
expected = jnp.linalg.solve(a, b)
actual = api.jit(linear_solve)(a, b)
self.assertAllClose(expected, actual)
c = rng.randn(3, 2)
expected = jnp.linalg.solve(a, c)
actual = api.vmap(linear_solve, (None, 1), 1)(a, c)
self.assertAllClose(expected, actual)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_zeros(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, explicit_jacobian_solve,
explicit_jacobian_solve)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
jtu.check_grads(lambda x: linear_solve(x, b), (a,), order=2,
rtol={np.float32: 5e-3})
jtu.check_grads(lambda x: linear_solve(a, x), (b,), order=2,
rtol={np.float32: 5e-3})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_iterative(self):
def richardson_iteration(matvec, b, omega=0.1, tolerance=1e-6):
# Equivalent to vanilla gradient descent:
# https://en.wikipedia.org/wiki/Modified_Richardson_iteration
def cond(x):
return jnp.linalg.norm(matvec(x) - b) > tolerance
def body(x):
return x + omega * (b - matvec(x))
return lax.while_loop(cond, body, b)
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, richardson_iteration,
richardson_iteration)
def build_and_solve(a, b):
# intentionally non-linear in a and b
matvec = partial(high_precision_dot, jnp.exp(a))
return matrix_free_solve(matvec, jnp.cos(b))
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = jnp.linalg.solve(jnp.exp(a), jnp.cos(b))
actual = build_and_solve(a, b)
self.assertAllClose(expected, actual, atol=1e-5)
jtu.check_grads(build_and_solve, (a, b), atol=1e-5, order=2,
rtol={jnp.float32: 6e-2, jnp.float64: 2e-3})
# vmap across an empty dimension
jtu.check_grads(
api.vmap(build_and_solve), (a[None, :, :], b[None, :]),
atol=1e-5,
order=2,
rtol={jnp.float32: 6e-2, jnp.float64: 2e-3})
def test_custom_linear_solve_cholesky(self):
def positive_definite_solve(a, b):
factors = jsp.linalg.cho_factor(a)
def solve(matvec, x):
return jsp.linalg.cho_solve(factors, x)
matvec = partial(high_precision_dot, a)
return lax.custom_linear_solve(matvec, b, solve, symmetric=True)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = jnp.linalg.solve(np.asarray(posify(a)), b)
actual = positive_definite_solve(posify(a), b)
self.assertAllClose(expected, actual)
actual = api.jit(positive_definite_solve)(posify(a), b)
self.assertAllClose(expected, actual)
# numerical gradients are only well defined if ``a`` is guaranteed to be
# positive definite.
jtu.check_grads(
lambda x, y: positive_definite_solve(posify(x), y),
(a, b), order=2, rtol=1e-2)
def test_custom_linear_solve_complex(self):
def solve(a, b):
def solve(matvec, x):
return jsp.linalg.solve(a, x)
def tr_solve(matvec, x):
return jsp.linalg.solve(a.T, x)
matvec = partial(high_precision_dot, a)
return lax.custom_linear_solve(matvec, b, solve, tr_solve)
rng = np.random.RandomState(0)
a = 0.5 * rng.randn(2, 2) + 0.5j * rng.randn(2, 2)
b = 0.5 * rng.randn(2) + 0.5j * rng.randn(2)
jtu.check_grads(solve, (a, b), order=2, rtol=1e-2)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_lu(self):
def linear_solve(a, b):
a_factors = jsp.linalg.lu_factor(a)
at_factors = jsp.linalg.lu_factor(a.T)
def solve(matvec, x):
return jsp.linalg.lu_solve(a_factors, x)
def transpose_solve(vecmat, x):
return jsp.linalg.lu_solve(at_factors, x)
return lax.custom_linear_solve(
partial(high_precision_dot, a), b, solve, transpose_solve)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
expected = jnp.linalg.solve(a, b)
actual = linear_solve(a, b)
self.assertAllClose(expected, actual)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
# regression test for https://github.com/google/jax/issues/1536
jtu.check_grads(api.jit(linear_solve), (a, b), order=2,
rtol={np.float32: 2e-3})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_without_transpose_solve(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def loss(a, b):
matvec = partial(high_precision_dot, a)
x = lax.custom_linear_solve(matvec, b, explicit_jacobian_solve)
return jnp.sum(x)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(loss, (a, b), order=2, modes=['fwd'],
atol={np.float32: 2e-3, np.float64: 1e-11})
jtu.check_grads(api.vmap(loss), (a[None,:,:], b[None,:]), order=2,
modes=['fwd'], atol={np.float32: 2e-3, np.float64: 1e-11})
with self.assertRaisesRegex(TypeError, "transpose_solve required"):
api.grad(loss)(a, b)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_pytree(self):
"""Test custom linear solve with inputs and outputs that are pytrees."""
def unrolled_matvec(mat, x):
"""Apply a Python list of lists of scalars to a list of scalars."""
result = []
for i in range(len(mat)):
v = 0
for j in range(len(x)):
if mat[i][j] is not None:
v += mat[i][j] * x[j]
result.append(v)
return result
def unrolled_substitution_solve(matvec, b, lower_tri):
"""Solve a triangular unrolled system with fwd/back substitution."""
zero = jnp.zeros(())
one = jnp.ones(())
x = [zero for _ in b]
ordering = range(len(b)) if lower_tri else range(len(b) - 1, -1, -1)
for i in ordering:
residual = b[i] - matvec(x)[i]
diagonal = matvec([one if i == j else zero for j in range(len(b))])[i]
x[i] = residual / diagonal
return x
def custom_unrolled_lower_tri_solve(mat, b):
return lax.custom_linear_solve(
partial(unrolled_matvec, mat), b,
partial(unrolled_substitution_solve, lower_tri=True),
partial(unrolled_substitution_solve, lower_tri=False))
mat = [[1.0, None, None, None, None, None, None],
[1.0, 1.0, None, None, None, None, None],
[None, 1.0, 1.0, None, None, None, None],
[None, None, 1.0, 1.0, None, None, None],
[None, None, None, 1.0, 1.0, None, None],
[None, None, None, None, None, 2.0, None],
[None, None, None, None, None, 4.0, 3.0]]
rng = np.random.RandomState(0)
b = list(rng.randn(7))
# Non-batched
jtu.check_grads(custom_unrolled_lower_tri_solve, (mat, b), order=2,
rtol={jnp.float32: 2e-2})
# Batch one element of b (which, because of unrolling, should only affect
# the first block of outputs)
b_bat = list(b)
b_bat[3] = rng.randn(3)
jtu.check_grads(
api.vmap(
custom_unrolled_lower_tri_solve,
in_axes=(None, [None, None, None, 0, None, None, None]),
out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b_bat),
order=2,
rtol={jnp.float32: 1e-2})
# Batch one element of mat (again only affecting first block)
mat[2][1] = rng.randn(3)
mat_axis_tree = [
[0 if i == 2 and j == 1 else None for j in range(7)] for i in range(7)
]
jtu.check_grads(
api.vmap(
custom_unrolled_lower_tri_solve,
in_axes=(mat_axis_tree, None),
out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b),
order=2)
def test_custom_linear_solve_errors(self):
solve = lambda f, x: x
with self.assertRaisesRegex(TypeError, re.escape("matvec() output pytree")):
lax.custom_linear_solve(lambda x: [x], 1.0, solve, solve)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: [x], solve)
with self.assertRaisesRegex(
TypeError, re.escape("transpose_solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, solve, lambda f, x: [x])
with self.assertRaisesRegex(ValueError, re.escape("solve() output shapes")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: jnp.ones(2), solve)
def bad_matvec_usage(a):
return lax.custom_linear_solve(
lambda x: a * jnp.ones(2), 1.0, solve, solve)
with self.assertRaisesRegex(ValueError, re.escape("matvec() output shapes")):
api.jvp(bad_matvec_usage, (1.0,), (1.0,))
def testIssue810(self):
def loss(A):
def step(x, i):
return jnp.matmul(A, x), None
init_x = jnp.zeros(A.shape[-1:])
last_x, _ = lax.scan(step, init_x, jnp.arange(10))
return jnp.sum(last_x)
A = jnp.zeros((3, 3))
# The second DUS was unnecessarily replicating A across time.
# We check XLA because _scan_impl is "underneath" the jaxpr language.
s = str(api.xla_computation(api.grad(loss))(A).as_hlo_text())
assert s.count("dynamic-update-slice(") < 2
def testScanLengthArg(self):
def arange(n):
return lax.scan(lambda c, _: (c + 1, c), 0, None, length=n)[1]
ans = arange(10)
expected = np.arange(10)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_while_loop_of_pmap(self):
# code from jsnoek@
def body(i, x):
result = api.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x) # noqa: F821
ans = f_loop(jnp.ones(api.device_count()))
del body, f_loop
def body2(i, x):
result = jnp.broadcast_to(jnp.sin(x).sum(), x.shape)
return result + x
g_loop = lambda x: lax.fori_loop(0, 3, body2, x)
expected = g_loop(jnp.ones(api.device_count()))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_while_loop_of_pmap_error_message(self):
def body(i, x):
result = api.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x)
too_big = 2 * api.device_count()
self.assertRaisesRegex(
ValueError,
re.escape(
"compiling a primitive computation `while` that requires {} "
"replicas, but only {} XLA devices are available on backend {}."
.format(too_big, api.device_count(), jtu.device_under_test())),
lambda: f_loop(jnp.ones(too_big)))
@parameterized.named_parameters(
{"testcase_name": "_{}".format(scan_name),
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS)
def test_scan_reverse(self, scan):
def cumsum(x, reverse):
return scan(lambda c, x: (c + x, c + x), 0, x, reverse=reverse)[1]
x = np.array([3, 1, 4, 1, 5, 9])
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
with api.disable_jit():
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
with api.disable_jit():
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
def test_scan_unroll(self):
d = jnp.ones(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
scan = lambda c, xs: lax.scan(f, c, xs)
scan_unrolled = lambda c, xs: lax.scan(f, c, xs, unroll=2)
# jaxprs should be the same size
self.assertEqual(
len(str(api.make_jaxpr(scan)(c, xs))),
len(str(api.make_jaxpr(scan_unrolled)(c, xs))))
# but HLO should grow due to unrolling
self.assertLess(
len(str(api.xla_computation(scan)(c, xs).as_hlo_text())),
len(str(api.xla_computation(scan_unrolled)(c, xs).as_hlo_text())))
def test_disable_jit_cond_with_vmap(self):
# https://github.com/google/jax/issues/3093
def fn(t):
return lax.cond(t > 0, 0, lambda x: 0, 0, lambda x: 1)
fn = api.vmap(fn)
with api.disable_jit():
_ = fn(jnp.array([1])) # doesn't crash
def test_disable_jit_while_loop_with_vmap(self):
# https://github.com/google/jax/issues/2823
def trivial_while(y):
return lax.while_loop(lambda x: x < 10.0, lambda x: x + 1.0, y)
with api.disable_jit():
api.vmap(trivial_while)(jnp.array([3.0,4.0])) # doesn't crash
def test_vmaps_of_while_loop(self):
# https://github.com/google/jax/issues/3164
def f(x, n): return lax.fori_loop(0, n, lambda _, x: x + 1, x)
x, n = jnp.arange(3), jnp.arange(4)
api.vmap(api.vmap(f, (None, 0)), (0, None))(x, n) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"_{shape}_axis={axis}",
"shape": shape, "axis": axis}
for shape in [
[0], [1], [2], [3], [5], [10], [1000],
[2, 3], [7, 5], [5, 6, 7]
]
for axis in range(-len(shape), len(shape) - 1))
def testAssociativeScanUnstructured(self, shape, axis):
data = np.arange(np.prod(shape)).reshape(shape) + 7
expected = np.cumsum(data, axis=axis)
result = lax.associative_scan(operator.add, data, axis=axis)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanUnstructured1000Reverse(self):
data = np.arange(1000) + 32
expected = np.cumsum(data[::-1])[::-1]
result = lax.associative_scan(operator.add, data, reverse=True)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanStructured3(self):
pair = collections.namedtuple('pair', ('first', 'second'))
data = pair(first=np.array([0., 1., 2.]),
second=np.array([0., 10., 20.]))
def fn(a, b):
return pair(first=a.first + b.first,
second=a.second + b.second)
result = lax.associative_scan(fn, elems=data)
self.assertAllClose(result.first, np.array([0., 1., 3.]),
check_dtypes=False)
self.assertAllClose(result.second, np.array([0., 10., 30.]),
check_dtypes=False)
def test_scan_typecheck_param(self):
d = jnp.ones(2)
def f(c, a):
b = jnp.cos(jnp.sum(a) + jnp.sum(c) + jnp.sum(d))
c = jnp.sin(c * b)
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
scan_fun = lambda c, xs: lax.scan(f, c, xs)
def new_jaxpr():
jaxpr = api.make_jaxpr(scan_fun)(c, xs).jaxpr
scan = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'scan')
return jaxpr, scan
jaxpr, eqn = new_jaxpr()
eqn.params['reverse'] = 4
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param reverse of type int, bool required: 4'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['num_consts'] = -3
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param num_consts of type int, '
'non-negative int required: -3'),
lambda: core.check_jaxpr(jaxpr))
def test_cond_typecheck_param(self):
def new_jaxpr():
jaxpr = api.make_jaxpr(
lambda x: lax.switch(0, [jnp.sin, jnp.cos], x))(1.).jaxpr
cond = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'cond')
return jaxpr, cond
jaxpr, eqn = new_jaxpr()
eqn.params['branches'] = (4, 2)
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid cond param branches of type tuple, '
'tuple of ClosedJaxpr required: (4, 2)'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['linear'] = (4, 2)
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid cond param linear of type tuple, '
'tuple of bool required: (4, 2)'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['linear'] = 'multi\nline'
self.assertRaisesRegex(
core.JaxprTypeError,
r'invalid cond param linear of type str, '
r'tuple of bool required:\nmulti\nline',
lambda: core.check_jaxpr(jaxpr))
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_scan_init_weak_type(self, dtype):
def func(carry, x):
return carry + x, x
init_weak = 0 # Python scalars are weakly-typed.
x = jnp.ones(5, dtype=dtype)
carry, result = lax.scan(func, init_weak, x)
self.assertEqual(carry, x.sum())
self.assertArraysEqual(result, x)
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_while_loop_init_weak_type(self, dtype):
# This tests whether lax.while_loop can properly handle weakly-typed
# initial values.
def cond_fun(val):
return val < 2
def body_fun(val):
return val + increment
increment = jnp.array(1, dtype=dtype)
init_weak = 0 # Python scalars are weakly-typed.
result = lax.while_loop(cond_fun, body_fun, init_weak)
self.assertArraysEqual(result, jnp.full_like(increment, 2))
def test_scan_vjp_forwards_extensive_residuals(self):
# https://github.com/google/jax/issues/4510
def cumprod(x):
s = jnp.ones((2, 32), jnp.float32)
return lax.scan(lambda s, x: (x*s, s), s, x)
rng = np.random.RandomState(1234)
x = jnp.asarray(rng.randn(32, 2, 32).astype('float32'))
_, vjp_fun = api.vjp(cumprod, x)
# Need to spelunk into vjp_fun. This is fragile, and if it causes problems
# just skip this test.
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIs(ext_res, x)
x = rng.randn(32, 2, 32).astype('float32') # numpy.ndarray, not DeviceArray
_, vjp_fun = api.vjp(cumprod, x)
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIsInstance(ext_res, xla.DeviceArray)
def test_scan_vmap_collectives(self):
def scan_f(state, x):
s = lax.psum(state, 'i') * x
return state, s
def scan(state, xs):
return lax.scan(scan_f, state, xs)
scan_v = api.vmap(scan, in_axes=0, out_axes=0, axis_name='i')
self.assertAllClose(
scan_v(jnp.ones([1]), jnp.arange(5).reshape((1, 5))),
(jnp.array([1.]), jnp.array([[0., 1., 2., 3., 4.]])))
def test_xla_cpu_gpu_loop_cond_bug(self):
# https://github.com/google/jax/issues/5900
def deriv(f):
return lambda x, *args: jax.linearize(lambda x: f(x, *args), x)[1](1.0)
def _while_loop(cond_fun, body_fun, init_val, max_iter):
def _iter(val):
next_val = body_fun(val)
next_cond = True
return next_val, next_cond
def _fun(tup, _):
val, cond = tup
return jax.lax.cond(cond, _iter, lambda x: (x, False), val), _
init = (init_val, cond_fun(init_val))
return jax.lax.scan(_fun, init, None, length=max_iter)[0][0]
def my_pow(x, y):
def body_fun(val):
return val * x
def cond_fun(val):
return True
return _while_loop(cond_fun, body_fun, 1.0, y)
self.assertAllClose(deriv(my_pow)(3.0, 1), 1.0, check_dtypes=False)
def test_unexpected_tracer_error(self):
with self.assertRaisesRegex(core.UnexpectedTracerError,
"transformed by while_loop"):
lst = []
def side_effecting_body(val):
lst.append(val)
return val+1
lax.while_loop(lambda x: x < 2, side_effecting_body, 1)
lst[0] += 1
with self.assertRaisesRegex(core.UnexpectedTracerError,
"transformed by scan"):
lst = []
def side_effecting_scan(carry, val):
lst.append(val)
return carry, val+1
lax.scan(side_effecting_scan, None, jnp.ones((2, 2)))
lst[0] += 1
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| 33.255787
| 110
| 0.604788
|
import collections
from functools import partial
import itertools
import operator
import re
from unittest import SkipTest
import textwrap
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import numpy.random as npr
import jax
from jax._src import api
from jax import core
from jax import lax
from jax import random
from jax import test_util as jtu
from jax import tree_util
from jax._src.util import unzip2
from jax.lib import xla_bridge
from jax.interpreters import xla
import jax.numpy as jnp
import jax.scipy as jsp
from jax.config import config
config.parse_flags_with_absl()
def cond_via_switch(pred, true_fun, false_fun, op, *args):
if len(args) > 0:
assert len(args) == 1
true_op, _true_fun, false_op, _false_fun = true_fun, false_fun, op, args[0]
op = (false_op, true_op)
false_fun = lambda op: _false_fun(op[0])
true_fun = lambda op: _true_fun(op[1])
index = lax.convert_element_type(pred, np.int32)
return lax.switch(index, [false_fun, true_fun], op)
COND_IMPLS = [
(lax.cond, 'cond'),
(cond_via_switch, 'switch'),
]
SCAN_IMPLS = [
(lax.scan, 'unroll1'),
(partial(lax.scan, unroll=2), 'unroll2'),
]
def while_loop_reference(cond, body, carry):
while cond(carry):
carry = body(carry)
return carry
def scan_reference(f, init, xs):
carry = init
ys = []
for x in xs:
(carry, y) = f(carry, x)
ys.append(lax.reshape(y, (1,) + np.shape(y)))
ys = lax.concatenate(ys, 0)
return carry, ys
def high_precision_dot(a, b):
return lax.dot(a, b, precision=lax.Precision.HIGHEST)
def posify(matrix):
return high_precision_dot(matrix, matrix.T.conj())
class LaxControlFlowTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
jax._src.lax.control_flow._initial_style_open_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxpr.cache_clear()
jax._src.lax.control_flow._initial_style_jaxprs_with_common_consts.cache_clear()
def testWhileWithTuple(self):
limit = 10
def loop_cond(state):
pos, _ = state
return lax.lt(pos, limit)
def loop_body(state):
pos, count = state
return (lax.add(pos, 1), lax.add(count, 1))
def loop(init):
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
self.assertEqual(loop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(3), limit - 3)
def testWhileWithManyArgs(self):
nargs = 256
def loop_cond(state):
return lax.lt(state[0], 2)
def loop_body(state):
return tuple(lax.add(s, 1) for s in state)
_ = lax.while_loop(loop_cond, loop_body, (0,) * nargs)
def testNestedWhile(self):
def outer_loop(num):
def cond_fun(state):
num, i, _ = state
return lax.lt(i, num)
def body_fun(state):
num, i, count = state
return (num, lax.add(i, 1), inner_loop(i, count))
init_val = (num, 0, 0)
_, i, count = lax.while_loop(cond_fun, body_fun, init_val)
return (i, count)
def inner_loop(i, count):
def cond_fun(state):
i, j, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, count = state
return (i, lax.add(j, 1), lax.add(count, 1))
init_val = (i, 0, count)
_, _, count = lax.while_loop(cond_fun, body_fun, init_val)
return count
cloop = api.jit(outer_loop)
self.assertEqual(outer_loop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(2), (2, 3))
self.assertEqual(cloop(4), (4, 10))
def testWhileWithClosure(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
return (lax.add(pos, 1), lax.add(count, inc))
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileWithClosureJit(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))
return api.jit(f)(pos, inc)
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileTypeErrors(self):
tuple_treedef = tree_util.tree_structure((1., 1.))
leaf_treedef = tree_util.tree_structure(0.)
with self.assertRaisesRegex(TypeError,
re.escape(f"cond_fun must return a boolean scalar, but got pytree {tuple_treedef}.")):
lax.while_loop(lambda c: (1., 1.), lambda c: c, 0.)
with self.assertRaisesRegex(TypeError,
re.escape("cond_fun must return a boolean scalar, but got output type(s) [ShapedArray(float32[])].")):
lax.while_loop(lambda c: np.float32(1.), lambda c: c, np.float32(0.))
with self.assertRaisesRegex(TypeError,
re.escape("body_fun output and input must have same type structure, "
f"got {tuple_treedef} and {leaf_treedef}.")):
lax.while_loop(lambda c: True, lambda c: (1., 1.), 0.)
with self.assertRaisesWithLiteralMatch(TypeError,
("body_fun output and input must have identical types, got\n"
"ShapedArray(bool[], weak_type=True)\n"
"and\n"
"ShapedArray(float32[]).")):
lax.while_loop(lambda c: True, lambda c: True, np.float32(0.))
def testNestedWhileWithDynamicUpdateSlice(self):
num = 5
def update_entry(arr, val, i, j):
val = lax.reshape(val, [1, 1])
return lax.dynamic_update_slice(arr, val, (i, j))
def outer_loop(arr):
def cond_fun(state):
i, num, _, _ = state
return lax.lt(i, num)
def body_fun(state):
i, num, arr, out = state
return (lax.add(i, 1), num, arr, inner_loop(i, arr, out))
out = np.zeros(arr.shape, dtype=arr.dtype)
init_val = (0, num, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
def inner_loop(i, arr, out):
def cond_fun(state):
i, j, _, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, arr, out = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
arr_i_j = lax.dynamic_index_in_dim(arr_i, j, 0, False)
out = update_entry(out, arr_i_j, i, j)
return (i, lax.add(j, 1), arr, out)
init_val = (i, 0, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
cloop = api.jit(outer_loop)
arr = npr.RandomState(0).randn(5, 5)
self.assertAllClose(outer_loop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), np.tril(arr), check_dtypes=False)
def testLoopWithConjunctionCondition(self):
def sum_first_n(arr, num):
def cond_fun(state):
arr, num, i, _ = state
return lax.bitwise_and(lax.lt(i, num), lax.lt(i, arr.shape[0]))
def body_fun(state):
arr, num, i, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, num, lax.add(i, 1), lax.add(total, arr_i))
init_val = (arr, num, 0, 0.)
_, _, _, total = lax.while_loop(cond_fun, body_fun, init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testWhileLoopBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < 3, lambda x: x + 2, x)
ans = api.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = api.jit(fun)
ans = api.vmap(fun)(np.array([0, 1, 2, 3]))
expected = np.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopAxisIndexBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < lax.axis_index('i'), lambda x: x + 2, x)
ans = api.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0]))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = api.jit(fun)
ans = api.vmap(fun, axis_name='i')(np.array([0, 0, 0, 0]))
expected = np.array([0, 2, 2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopCondConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < y, lambda x: x + 2, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, np.array([2, 3]))
expected = np.array([2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopBodyConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < 3, lambda x: x + y, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, jnp.array([2, 3]))
expected = np.array([4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopTupleBatched(self):
def cond_fun(loop_carry):
x, y = loop_carry
return x + y < 5
def body_fun(loop_carry):
x, y = loop_carry
x = x + 1
return x, y
def fun(x, y):
return lax.while_loop(cond_fun, body_fun, (x, y))
ans = api.vmap(fun)(np.array([0, 0]), np.array([1, 2]))
expected = (np.array([4, 3]), np.array([1, 2]))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_issue_3204(self):
def test(a, b):
val = 0
i = 0
j = 0
condfun_1 = lambda inp: inp[1] < a + 1
condfun_2 = lambda inp: inp[2] < b + 1
def bodyfun_1(inp):
val, i, j = inp
j = 0
def bodyfun_2(inp):
val, i, j = inp
val += i + j
j += 1
return (val, i, j)
result = lax.while_loop(condfun_2, bodyfun_2, (val, i, j))
val = result[0]
i += 1
return (val, i, j)
result = lax.while_loop(condfun_1, bodyfun_1, (val, i, j))
return result[0]
arr = np.arange(5)
vmap_test = api.vmap(test, (0, 0))
vmap_test(arr, arr)
def testForiLoopErrors(self):
with self.assertRaisesRegex(
TypeError, "arguments to fori_loop must have equal types"):
lax.fori_loop(np.int16(0), jnp.int32(10), (lambda i, c: c), jnp.float32(7))
def testForiLoopBatched(self):
def body_fun(i, loop_carry):
x, y = loop_carry
x = x + 1
y = y + 2
return x, y
def fun(x):
return lax.fori_loop(0, 10, body_fun, (x, 0))
ans = api.vmap(fun)(np.array([0, 1]))
expected = (np.array([10, 11]), np.array([20, 20]))
self.assertAllClose(ans, expected, check_dtypes=False)
def testForiLoopBatchedIssue1190(self):
cond_fun = lambda carry: carry[0] < 4
body_fun = lambda carry: (carry[0] + 1, carry[1] + 1)
f = lambda x: lax.while_loop(cond_fun, body_fun, (0, x))
jaxpr = api.make_jaxpr(api.vmap(f))(jnp.arange(3))
eqn = jaxpr.jaxpr.eqns[0]
self.assertIs(eqn.primitive, lax.while_p)
self.assertEqual(eqn.params['cond_jaxpr'].in_avals[0].shape, ())
def testForiLoopBasic(self):
def body_fun(i, tot):
return lax.add(tot, i)
def count(num):
return lax.fori_loop(0, num, body_fun, 0)
self.assertEqual(count(2), 1)
self.assertEqual(count(3), 3)
self.assertEqual(count(4), 6)
for args_maker in [lambda: [2], lambda: [3], lambda: [4]]:
self._CompileAndCheck(count, args_maker)
def testForiLoopClosure(self):
def count(num):
def body_fun(i, tot):
return lax.add(num, lax.add(tot, i))
return lax.fori_loop(0, num, body_fun, 0)
cfun = api.jit(count)
self.assertEqual(count(2), 1 + 2**2)
self.assertEqual(count(2), cfun(2))
self.assertEqual(count(3), 3 + 3**2)
self.assertEqual(count(3), cfun(3))
self.assertEqual(count(4), 6 + 4**2)
self.assertEqual(count(4), cfun(4))
def testForiLoopTupleState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i))
init_val = (arr, 0.)
_, total = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun,
init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopDictState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state['arr'], state['total']
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return {'arr': arr, 'total': lax.add(total, arr_i)}
init_val = {'arr': arr, 'total': 0.}
out_val = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return out_val['total']
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testForiLoopEmptyTupleInState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total, _ = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i), ())
init_val = (arr, 0., ())
_, tot, _ = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return tot
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(jnp.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), np.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
def testCond(self):
def fun(x):
if x < 3:
return (x, x)
else:
y = lax.mul(2, x)
return y, lax.mul(2, y)
@api.jit
def cfun(x):
def false_fun(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
return lax.cond(lax.lt(x, 3), lambda x: (x, x), false_fun, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(0), (0, 0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(1), (1, 1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(2), (2, 2))
self.assertEqual(fun(3), cfun(3))
self.assertEqual(fun(3), (6, 12))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(fun(4), (8, 16))
def testSwitch(self):
def branch(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun(x):
if x <= 0:
return branches[0](x)
elif x == 1:
return branches[1](x)
else:
return branches[2](x)
def cfun(x):
return lax.switch(x, branches, x)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
cfun = api.jit(cfun)
self.assertEqual(fun(-1), cfun(-1))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(3), cfun(3))
def testSwitchResidualsMerge(self):
def get_conds(fun):
jaxpr = api.make_jaxpr(api.grad(fun))(0., 0)
return [eqn for eqn in jaxpr.jaxpr.eqns if eqn.primitive.name == 'cond']
def branch_invars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.invars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
def branch_outvars_len(cond_eqn):
lens = [len(jaxpr.jaxpr.outvars) for jaxpr in cond_eqn.params['branches']]
assert len(set(lens)) == 1
return lens[0]
branches1 = [
lambda x: jnp.sin(x),
lambda x: jnp.cos(x)]
branches2 = branches1 + [
lambda x: jnp.sinh(x)]
branches3 = branches2 + [
lambda x: jnp.sin(x) + jnp.cos(x)]
def fun1(x, i):
return lax.switch(i + 1, branches1, x)
def fun2(x, i):
return lax.switch(i + 1, branches2, x)
def fun3(x, i):
return lax.switch(i + 1, branches3, x)
fwd1, bwd1 = get_conds(fun1)
fwd2, bwd2 = get_conds(fun2)
fwd3, bwd3 = get_conds(fun3)
fwd1_num_out = branch_outvars_len(fwd1)
fwd2_num_out = branch_outvars_len(fwd2)
fwd3_num_out = branch_outvars_len(fwd3)
assert fwd1_num_out == fwd2_num_out
assert fwd3_num_out == fwd2_num_out + 1
bwd1_num_in = branch_invars_len(bwd1)
bwd2_num_in = branch_invars_len(bwd2)
bwd3_num_in = branch_invars_len(bwd3)
assert bwd1_num_in == bwd2_num_in
assert bwd3_num_in == bwd2_num_in + 1
def testOneBranchSwitch(self):
branch = lambda x: -x
f = lambda i, x: lax.switch(i, [branch], x)
x = 7.
self.assertEqual(f(-1, x), branch(x))
self.assertEqual(f(0, x), branch(x))
self.assertEqual(f(1, x), branch(x))
cf = api.jit(f)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
cf = api.jit(f, static_argnums=0)
self.assertEqual(cf(-1, x), branch(x))
self.assertEqual(cf(0, x), branch(x))
self.assertEqual(cf(1, x), branch(x))
def testIssue1379(self):
def fun(pred):
return lax.cond(pred, lambda x: (True, x), lambda x: (False, x), pred)
@api.jit
def cfun(pred):
return fun(pred)
self.assertEqual(fun(0), cfun(0), (False,0))
self.assertEqual(fun(0.), cfun(0.), (False,0.))
self.assertEqual(fun(1), cfun(1), (True,1))
self.assertEqual(fun(1.), cfun(1.), (True,1.))
for pred in ["abc", [], [1,2]]:
for f in [fun, cfun]:
self.assertRaises(TypeError, f, pred)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testNestedCond(self, cond):
def fun(x):
if x < 2:
return lax.mul(2, x)
else:
if x < 5:
return lax.mul(3, x)
else:
return lax.mul(4, x)
@api.jit
def cfun(x):
return cond(
lax.lt(x, 2),
lambda x: lax.mul(2, x),
lambda x: cond(lax.lt(x, 5),
x, lambda x: lax.mul(3, x),
4, lambda y: lax.mul(y, x)),
x)
self.assertEqual(cfun(1), 2)
self.assertEqual(cfun(3), 9)
self.assertEqual(cfun(6), 24)
self.assertEqual(cfun(1), fun(1))
self.assertEqual(cfun(3), fun(3))
self.assertEqual(cfun(6), fun(6))
def testCondTypeErrors(self):
with self.assertRaisesRegex(TypeError,
re.escape("Pred type must be either boolean or number, got <function")):
lax.cond(lambda x: True, lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got foo of type <class 'str'>")):
lax.cond("foo", lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got (1.0, 1.0) of type <class 'tuple'>")):
lax.cond((1., 1.), lambda top: 2., lambda fop: 3., 1.)
with self.assertRaisesRegex(TypeError,
re.escape("true_fun and false_fun output must have same type structure, "
f"got {tree_util.tree_structure(2.)} and {tree_util.tree_structure((3., 3.))}.")):
lax.cond(True, lambda top: 2., lambda fop: (3., 3.), 1.)
with self.assertRaisesRegex(
TypeError, textwrap.dedent(
r"""
true_fun and false_fun output must have identical types, got
ShapedArray\(float32\[1\]\)
and
ShapedArray\(float32\[\].*\).""").strip()):
lax.cond(True,
lambda top: jnp.array([1.], jnp.float32),
lambda fop: jnp.float32(1.),
1.)
def testSwitchErrors(self):
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got <function")):
lax.switch(lambda x: True, [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Index type must be an integer, got foo.")):
lax.switch("foo", [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("Branch index must be scalar, got (1.0, 1.0) of shape (2,).")):
lax.switch((1., 1.), [lambda _: 2., lambda _: 3.], 1.)
with self.assertRaisesRegex(ValueError,
re.escape("Empty branch sequence")):
lax.switch(0, [], 1.)
with self.assertRaisesRegex(TypeError,
re.escape("branch 0 and 1 outputs must have same type structure, "
f"got {tree_util.tree_structure(2.)} and {tree_util.tree_structure((3., 3.))}.")):
lax.switch(1, [lambda _: 2., lambda _: (3., 3.)], 1.)
with self.assertRaisesRegex(
TypeError, textwrap.dedent(
r"""
branch 0 and 1 outputs must have identical types, got
ShapedArray\(float32\[1\]\)
and
ShapedArray\(float32\[\].*\).""").strip()):
lax.switch(1, [lambda _: jnp.array([1.], jnp.float32),
lambda _: jnp.float32(1.)],
1.)
def testCondOneBranchConstant(self):
def fun(x):
if x < 3:
return 5.
else:
return x
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3), lambda x: 5, lambda x: x, x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), 5)
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), 4)
def testCondOneBranchConstantTuple(self):
def fun(x):
if x < 3:
return (1., 2., 3.)
else:
return (x, 2., 4.)
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3),
lambda x: (1, 2., 3.),
lambda x: (x, 2., 4.),
x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), (1, 2., 3.))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), (4, 2., 4.))
def testCondBatched(self):
def fun(x, y, z):
pred = lax.lt(x, 3)
true_fun = lambda y: y
false_fun = lambda z: lax.neg(z)
return lax.cond(pred, y, true_fun, z, false_fun)
x = jnp.array(2)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(4)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = api.jit(fun)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = api.vmap(fun, (None, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None)))(x, y, z)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array([2, 4])
ans = api.vmap(fun, (0, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None)))(x, y, z)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
ans = api.vmap(fun)(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testSwitchBatched(self):
def fun(index, x, y, z):
branches = [lambda xyz: xyz[0],
lambda xyz: lax.neg(xyz[1]),
lambda xyz: lax.sign(xyz[2])]
return lax.switch(index, branches, (x, y, z))
x = jnp.array(0)
y = jnp.array([1, 2])
z = jnp.array([3, 4])
w = jnp.array(9)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array(1)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0, None)))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = api.jit(fun)
ans = api.vmap(fun, (None, 0, 0, None))(x, y, z, w)
expected = np.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = jnp.array(5)
ans = api.vmap(fun, (None, 0, None, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None, None)))(x, y, z, w)
expected = np.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = jnp.array([0, 1])
ans = api.vmap(fun, (0, 0, None, None))(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None, None)))(x, y, z, w)
expected = np.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = jnp.array([3, 4])
w = jnp.array([9, 9])
ans = api.vmap(fun)(x, y, z, w)
jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z, w)
expected = np.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testCondJVP(self):
def fun_ref(x):
if x < 3:
return (x, x)
else:
y = 2 * x
return y, 2 * y
def fun(x):
def false_fun(x):
y = 2 * x
return y, 2 * y
return lax.cond(x < 3, lambda x: (x, x), false_fun, x)
x = 3.14
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testSwitchJVP(self):
def branch(x):
y = 2 * x
return y, 2 * y
branches = [lambda x: (x, x),
branch,
lambda x: (x, -x)]
def fun_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
def fun(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJVP2(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
x = 2.72
ans = api.jvp(fun, (x,), (x,))
expected = api.jvp(fun_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd"])
def testCondGrad(self):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
x = 2.14
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
def testCondGradVmapNan(self):
eps = 1e-3
def safe1(x):
return lax.cond(x < eps, lambda _: eps, lambda _: jnp.sqrt(x), ())
out = api.grad(lambda x: api.vmap(safe1)(x).sum())(np.zeros(10))
self.assertFalse(np.isnan(out).any())
def testSwitchGrad(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f_ref(x):
idx = x // 1
if idx <= 0:
return branches[0](x)
elif idx == 1:
return branches[1](x)
else:
return branches[2](x)
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-0.7, 0.7, 1.7, 2.7, 3.7]:
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
def testSwitchGradWithWeakTypeMismatch(self): s(1).dtype
dtype = jnp.float32 if dtype == jnp.float32 else jnp.float64
branches = [
lambda x: x,
lambda x: x + dtype(1),
]
def f_ref(x):
i = x.astype(jnp.int32)
return branches[i](x)
def f(x):
return lax.switch(x.astype(jnp.int32), branches, x)
for x in [0., 1.]:
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad2(self, cond):
def f_ref(x):
z = jnp.array([1., 2.]) * x if x[0] < 2 else jnp.sin(x)
return z.sum()
def _f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.]) * x,
lambda x: jnp.sin(x),
x)
f = lambda x: api.jit(_f)(x).sum()
x = 2.14 * jnp.ones(2)
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"])
x = 1.72 * jnp.ones(2)
ans = api.grad(f)(x)
expected = api.grad(f_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(f, (x,), order=2, modes=["fwd", "rev"],
rtol={jnp.float32: 1e-2, jnp.float64: 2e-3})
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad3(self, cond):
def fun_ref(x):
if x < 3:
return 2.
else:
return 2. * x
def fun(x):
return cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)
x = 3.14
ans = api.grad(fun)(x)
expected = api.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
x = 2.72
ans = api.grad(fun)(x)
expected = api.grad(fun_ref)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x,), order=2, modes=["fwd", "rev"])
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondGrad4(self, cond):
def fun_ref(x, y):
if x < 3:
return 2. * jnp.sin(y)
else:
return 2. * jnp.cos(x)
def fun(x, y):
return cond(
x < 3,
(), lambda _: 2. * jnp.sin(y),
x, lambda x: 2. * x)
y = 5.8
x = 3.14
ans = api.grad(fun, 1)(x, y)
expected = api.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
x = 2.72
ans = api.grad(fun, 1)(x, y)
expected = api.grad(fun_ref, 1)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(fun, (x, y), order=2, modes=["fwd", "rev"])
def testCondLinearize(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y, f_lin = api.linearize(f, 1.)
self.assertAllClose(y, 3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = api.linearize(f, 4.)
self.assertAllClose(y, jnp.sin(4.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(4.) * 2., check_dtypes=False)
def testSwitchLinearize(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
y, f_lin = api.linearize(f, -1.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = api.linearize(f, 0.)
self.assertAllClose(y, 0., check_dtypes=False)
self.assertAllClose(f_lin(2.), 6., check_dtypes=False)
y, f_lin = api.linearize(f, 1.)
self.assertAllClose(y, jnp.sin(1.), check_dtypes=False)
self.assertAllClose(f_lin(2.), jnp.cos(1.) * 2., check_dtypes=False)
y, f_lin = api.linearize(f, 2.)
self.assertAllClose(y, -2., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
y, f_lin = api.linearize(f, 3.)
self.assertAllClose(y, -3., check_dtypes=False)
self.assertAllClose(f_lin(2.), -2., check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondLinearize2(self, cond):
def f_ref(x):
z = jnp.array([1., 2.]) * x if x[0] < 2 else jnp.cos(jnp.sin(x))
return z.sum()
def f(x):
return cond(
x[0] < 2,
lambda x: jnp.array([1., 2.]) * x,
lambda x: jnp.cos(jnp.sin(x)),
x).sum()
x = 2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
x = -2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
f = api.jit(f)
x = 2.14 * jnp.ones(2)
y, f_lin = api.linearize(f, x)
y_ref, f_lin_ref = api.linearize(f_ref, x)
self.assertAllClose(y, y_ref, check_dtypes=False)
self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)
def testCondJit(self):
def f(x):
return lax.cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = api.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
def testSwitchJit(self):
branches = [lambda x: 3. * x,
lambda x: jnp.sin(x),
lambda x: -x]
def f(x):
idx = lax.convert_element_type(x // 1, np.int32)
return lax.switch(idx, branches, x)
for x in [-1., 0., 1., 2., 3.]:
y = api.jit(f)(x)
expected = f(x)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitDisabled(self, cond):
def f_ref(x):
return 3. * x if x < 2 else jnp.sin(x)
def f(x):
return cond(x < 2, lambda x: 3. * x, lambda x: jnp.sin(x), x)
with api.disable_jit():
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
with api.disable_jit():
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
def f_ref(x):
if x < 2:
return np.array([1., 2.]) * x
else:
return np.array([3., 4.]) * np.sin(x)
y = f(1.)
expected = f_ref(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = f(4.)
expected = f_ref(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondJitWithConsts(self, cond):
def f(x):
return cond(x < 2,
lambda x: np.array([1., 2.]) * x,
lambda x: np.array([3., 4.]) * jnp.sin(x),
x)
y = api.jit(f)(1.)
expected = f(1.)
self.assertAllClose(y, expected, check_dtypes=False)
y = api.jit(f)(4.)
expected = f(4.)
self.assertAllClose(y, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": f"_{name}", "cond": cond}
for cond, name in COND_IMPLS)
def testCondVmapGrad(self, cond):
def f_1(x): return x ** 2
def f_2(x): return x ** 3
def f(x): return cond(x > 0, f_1, f_2, x)
def g(x): return jnp.where(x > 0, f_1(x), f_2(x))
x = jnp.linspace(-1, 1, 20)
ans = api.vmap(api.grad(f))(x)
expected = api.vmap(api.grad(g))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testIssue1263(self):
def f(rng, x):
cond = random.bernoulli(rng)
return lax.cond(cond, x, lambda x: x, jnp.abs(x) - 1., lambda x: x)
def body_fn(i, state):
rng, x = state
key, subkey = random.split(rng)
return key, f(subkey, x)
def g(rng, x):
return lax.fori_loop(0, 10, body_fn, (rng, x))
api.vmap(g)(random.split(random.PRNGKey(0), 3), jnp.ones((3, 4)))
def testIssue514(self):
lax.cond(True,
(0, 0), lambda x: (x[0], 0),
(1, 1), lambda x: x)
def testIssue649(self):
from jax import lax
def body(x):
a, b = x
return (7, b + 1)
def cond(x):
a, b = x
return b < 10
out = lax.while_loop(cond, body, (33, 4))
self.assertEqual(out, (7, 10))
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanImpl(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = scan(f, c, as_)
expected = scan_reference(f, c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanJVP(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.jvp( lambda c, as_: scan(f, c, as_), (c, as_), (c, as_))
expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float64: 1e-14, np.float32: 1e-5})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
def testScanLinearize(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.linearize(lambda c, as_: scan(f, c, as_), c, as_)[1](c, as_)
expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float64: 1e-14})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_impl={}".format(
jit_scan, jit_f, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanGrad(self, jit_scan, jit_f, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.sum(jnp.sin(a)) + jnp.sum(jnp.sin(c)) + jnp.sum(jnp.sin(d))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.grad(lambda c, as_: list( scan(f, c, as_))[0].sum())(c, as_)
expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={np.float32: 2e-5, np.float64: 1e-13})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["rev"],
atol=1e-3, rtol=5e-3)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testScanRnn(self):
r = npr.RandomState(0)
n_in = 4
n_hid = 2
n_out = 1
length = 3
W_trans = r.randn(n_hid, n_hid + n_in).astype(jnp.float_)
W_out = r.randn(n_out, n_hid + n_in).astype(jnp.float_)
params = W_trans, W_out
inputs = r.randn(length, n_in).astype(jnp.float_)
targets = r.randn(length, n_out).astype(jnp.float_)
def step(params, state, input):
W_trans, W_out = params
stacked = jnp.concatenate([state, input])
output = jnp.tanh(jnp.dot(W_out, stacked))
next_state = jnp.tanh(jnp.dot(W_trans, stacked))
return next_state, output
def rnn(params, inputs):
init_state = jnp.zeros(n_hid)
_, outputs = lax.scan(partial(step, params), init_state, inputs)
return outputs
def loss(params, inputs, targets):
predictions = rnn(params, inputs)
return jnp.sum((predictions - targets)**2)
# evaluation doesn't crash
loss(params, inputs, targets)
api.jvp(lambda params: loss(params, inputs, targets), (params,), (params,))
# jvp numerical check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, modes=["fwd"],
rtol={np.float32: 2e-2, np.float64: 1e-6})
# linearize works
_, expected = api.jvp(loss, (params, inputs, targets),
(params, inputs, targets))
_, linfun = api.linearize(loss, params, inputs, targets)
ans = linfun(params, inputs, targets)
self.assertAllClose(ans, expected, check_dtypes=False)
# gradient evaluation doesn't crash
api.grad(loss)(params, inputs, targets)
jtu.check_grads(loss, (params, inputs, targets), order=2, rtol=2e-2)
batch_size = 7
batched_inputs = r.randn(batch_size, length, n_in).astype(jnp.float_)
batched_targets = r.randn(batch_size, length, n_out).astype(jnp.float_)
batched_loss = api.vmap(lambda x, y: loss(params, x, y))
losses = batched_loss(batched_inputs, batched_targets)
expected = np.stack(list(map(lambda x, y: loss(params, x, y),
batched_inputs, batched_targets)))
self.assertAllClose(losses, expected, check_dtypes=False, rtol=1e-2)
def testIssue711(self):
def harmonic_bond(conf, params):
return jnp.sum(conf * params)
def minimize_structure(test_params):
energy_fn = partial(harmonic_bond, params=test_params)
def apply_carry(carry, _):
i, x = carry
new_x = x - 0.1 * api.grad(energy_fn)(x)
new_carry = (i+1, new_x)
return new_carry, _
x0 = jnp.array([1., 2., 3.])
carry_final, _ = lax.scan(apply_carry, (0, x0), jnp.zeros((75, 0)))
_, x_final = carry_final
return x_final
initial_params = 0.5
minimize_structure(initial_params)
def loss(test_params):
x_final = minimize_structure(test_params)
return jnp.sum(jnp.sin(1.0 - x_final))
api.grad(loss)(0.25) # doesn't crash
def testIssue744(self):
Point = collections.namedtuple('Point', ['x', 'y'])
p0 = Point(x=jnp.array(1), y=jnp.array(2))
def plus_one(p, iter_idx):
return Point(p.x+1, p.y+1), iter_idx
self.assertRaisesRegex(
ValueError,
'scan got value with no leading axis to scan over.*',
lambda: lax.scan(plus_one, p0, list(range(5))))
def testScanTypeErrors(self):
a = jnp.arange(5)
with self.assertRaisesRegex(TypeError,
re.escape("scan body output must be a pair, got ShapedArray(float32[]).")):
lax.scan(lambda c, x: np.float32(0.), 0, a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure((0, 0, 0,))} "
f"and {tree_util.tree_structure((1, (2, 3)))}")):
lax.scan(lambda c, x: ((0, 0, 0), x), (1, (2, 3)), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure(a)} and {tree_util.tree_structure(None)}.")):
lax.scan(lambda c, x: (0, x), None, a)
with self.assertRaisesWithLiteralMatch(
TypeError,
"scan carry output and input must have identical types, got\n"
"ShapedArray(int32[])\n"
"and\n"
"ShapedArray(float32[])."):
lax.scan(lambda c, x: (np.int32(0), x), np.float32(1.0), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
f"got {tree_util.tree_structure(a)} and {tree_util.tree_structure((1, 2))}.")):
lax.scan(lambda c, x: (0, x), (1, 2), a)
@parameterized.named_parameters(
{"testcase_name": "_{}".format(scan_name),
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS)
def testScanHigherOrderDifferentiation(self, scan):
d = 0.75
def f(c, a):
b = jnp.sin(c * jnp.sum(jnp.cos(d * a)))
c = 0.9 * jnp.cos(d * jnp.sum(jnp.sin(c * a)))
return c, b
as_ = jnp.arange(6.).reshape((3, 2))
c = 1.
jtu.check_grads(lambda c, as_: scan(f, c, as_), (c, as_),
modes=["rev"], order=2, rtol={np.float32: 6e-3})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_in_axes={}_impl={}".format(
jit_scan, jit_f, in_axes, scan_name),
"jit_scan": jit_scan, "jit_f": jit_f, "in_axes": in_axes,
"scan": scan_impl}
for jit_scan in [False, True]
for jit_f in [False, True]
for scan_impl, scan_name in SCAN_IMPLS
for in_axes in itertools.product([None, 0, 1], [None, 0, 1, 2])
if in_axes != (None, None))
def testScanVmap(self, jit_scan, jit_f, in_axes, scan):
rng = np.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(scan, static_argnums=(0,))
as_shape = [5, 3]
c_shape = [4]
c_bdim, as_bdim = in_axes
if c_bdim is not None:
c_shape.insert(c_bdim, 7)
if as_bdim is not None:
as_shape.insert(as_bdim, 7)
as_ = rng.randn(*as_shape)
c = rng.randn(*c_shape)
ans = api.vmap(lambda c, as_: scan(f, c, as_), in_axes)(c, as_)
expected = api.vmap(lambda c, as_: scan_reference(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol=1e-5, atol=1e-5)
def testScanVmapTuples(self):
def f(c, a):
a1, a2 = a
c1, c2 = c
b = jnp.sum(jnp.cos(a1)) * jnp.sum(jnp.tan(c2 * a2))
c = c1 * jnp.sin(jnp.sum(a1 * a2)), c2 * jnp.cos(jnp.sum(a1))
return c, b
in_axes = (0, (1, 2))
r = np.random.RandomState(0)
as_ = (r.randn(3, 7), r.randn(3, 4, 7))
c = (r.randn(7, 2), r.randn(7))
expected_c_out, expected_bs = [], []
for i in range(7):
c_out, bs = lax.scan(f, (c[0][i], c[1][i]), (as_[0][:,i], as_[1][:,:,i]))
expected_c_out.append(c_out)
expected_bs.append(bs)
expected_c_out_0, expected_c_out_1 = unzip2(expected_c_out)
expected_c_out = (jnp.stack(expected_c_out_0), jnp.stack(expected_c_out_1))
expected_bs = jnp.stack(expected_bs)
expected = expected_c_out, expected_bs
ans = api.vmap(lambda c, as_: lax.scan(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
def testScanVmapFixpoint(self):
def f(carry_init):
def scan_body(c, x):
return ((c[1], c[2], c[3], 0.), None)
return lax.scan(scan_body, (0., 1., 2., carry_init), jnp.zeros(2))
carry_init = jnp.array([3., 4., 5.])
carry_out, _ = api.vmap(f)(carry_init)
self.assertAllClose(carry_out[3], jnp.array([0., 0., 0.]), check_dtypes=False)
self.assertAllClose(carry_out[2], jnp.array([0., 0., 0.]), check_dtypes = False)
self.assertAllClose(carry_out[1], carry_init, check_dtypes=False)
self.assertAllClose(carry_out[0], jnp.array([2., 2., 2.]), check_dtypes = False)
def testIssue757(self):
def fn(a):
return jnp.cos(a)
def loop(val):
iterations = 10
def apply_carry(x, i):
return api.grad(fn, argnums=(0,))(x)[0], i
final_val, _ = lax.scan(
apply_carry,
val,
jnp.arange(iterations)
)
return final_val
arg = 0.5
api.jit(api.jacfwd(loop, argnums=(0,)))(arg)
def testIssue804(self):
num_devices = xla_bridge.device_count()
f = partial(lax.scan, lambda c, x: (c + lax.psum(x, "i") , c), 0.)
api.pmap(f, axis_name="i")(jnp.ones((num_devices, 4))) # doesn't crash
def testMap(self):
f = lambda x: x ** 2
xs = jnp.arange(10)
expected = xs ** 2
actual = lax.map(f, xs)
self.assertAllClose(actual, expected)
def testMapEmpty(self):
ans = lax.map(lambda x: x * x, jnp.array([]))
expected = jnp.array([])
self.assertAllClose(ans, expected)
def testCaching(self):
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testCaching2(self):
# implemented (but could!), namely that Python functions that are distinct
# objects but are equivalent functions trigger cache hits. This kind of
# caching could be salient when using lambda functions with control flow:
#
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
#
# To get a cache hit on the second line we'd need to form a jaxpr and
ise SkipTest("not implemented")
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testWhileCondConstant(self):
out = lax.while_loop(lambda _: False, lambda _: (), ())
self.assertEqual(out, ())
@parameterized.named_parameters(
{"testcase_name": "_jit_loop={}_jit_body={}_jit_cond={}".format(
jit_loop, jit_body, jit_cond),
"jit_loop": jit_loop, "jit_body": jit_body, "jit_cond": jit_cond}
for jit_loop in [False, True]
for jit_body in [False, True]
for jit_cond in [False, True])
def testWhileJVP(self, jit_loop=True, jit_body=False, jit_cond=True):
cond = lambda x: x[0, 2] <= 8
body = lambda x: x * x
if jit_cond:
cond = api.jit(cond)
if jit_body:
body = api.jit(body)
loop = partial(lax.while_loop, cond, body)
if jit_loop:
loop = api.jit(loop)
loop_ref = partial(while_loop_reference, cond, body)
x = jnp.arange(9.).reshape((3, 3))
ans = api.jvp(loop, (x,), (x,))
expected = api.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop, (x,), order=2, modes=["fwd"])
def testWhileJVPViaForiLoop(self):
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * 2, x)
self.assertAllClose(f(2.), 16., check_dtypes=False)
self.assertAllClose(api.jvp(f, (2.,), (1.,)), (16., 8.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
f = lambda x: lax.fori_loop(0, 3, lambda i, x: x * (i + 1), x)
self.assertAllClose(f(2.), 12., check_dtypes=False)
self.assertAllClose(api.jvp(f, (2.,), (1.,)), (12., 6.), check_dtypes=False)
jtu.check_grads(f, (2.,), order=2, modes=["fwd"])
def testWhileJVPWithGrowingNonzeroTangents(self):
rng = np.random.RandomState(0)
def cond(state):
i, x, y, z = state
return i < 2
def body(state):
i, x, y, z = state
y = x * x
z = y * y
return i + 1, x, y, z
y, z = rng.randn(2), rng.randn(2)
def loop(loop_impl, x):
return loop_impl(cond, body, (0, x, y, z))[1]
loop_lax = partial(loop, lax.while_loop)
loop_ref = partial(loop, while_loop_reference)
x = rng.randn(2)
ans = api.jvp(loop_lax, (x,), (x,))
expected = api.jvp(loop_ref, (x,), (x,))
self.assertAllClose(ans, expected, check_dtypes=False)
jtu.check_grads(loop_lax, (x,), order=2, modes=["fwd"])
@parameterized.named_parameters(
dict(testcase_name="_loop={}".format(loop), loop=loop)
for loop in ["while", "fori", "fori_inside_cond", "fori_inside_scan"])
def testWhileGradError(self, loop: str = "fori_inside_scan"):
# Raise error for vjp for loops
if loop == "while":
func = lambda x: lax.while_loop(lambda i: i < 5., lambda i: i + 1., x)
elif loop == "fori":
func = lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x)
elif loop == "fori_inside_jit":
func = api.jit(lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x))
elif loop == "fori_inside_cond":
func = lambda x: lax.cond(True, x,
lambda x: lax.fori_loop(x, x + 2., lambda i, c: c, x),
1., lambda x: x)
elif loop == "fori_inside_scan":
func = lambda x: lax.scan(lambda c, x: (lax.fori_loop(x, x + 2., lambda i, c1: c1 * c, x),
None),
x, np.ones(2))[0]
else:
assert False
with self.assertRaisesRegex(ValueError, "Reverse-mode differentiation does not work for lax.while_loop"):
api.grad(func)(1.)
api.linearize(func, 1.) # Linearization works
def testIssue1316(self):
def f(carry, _):
c, key = carry
key, _ = random.split(key)
return (c, key), ()
key = random.PRNGKey(0)
api.grad(lambda c: lax.scan(f, (c, key), np.ones(3))[0][0])(0.) # doesn't crash
def testIssue1361(self):
@api.jit
def jit_run_scan(x):
def fun(carry, _):
x, _ = carry
return (2 * x, 0.), None
(x, _), _ = lax.scan(fun, (x, 0.), jnp.arange(3))
return x
api.grad(lambda x: jit_run_scan(x))(0.)
def test_custom_root_scalar(self):
def scalar_solve(f, y):
return y / f(1.0)
def binary_search(func, x0, low=0.0, high=100.0):
del x0 # unused
def cond(state):
low, high = state
midpoint = 0.5 * (low + high)
return (low < midpoint) & (midpoint < high)
def body(state):
low, high = state
midpoint = 0.5 * (low + high)
update_upper = func(midpoint) > 0
low = jnp.where(update_upper, low, midpoint)
high = jnp.where(update_upper, midpoint, high)
return (low, high)
solution, _ = lax.while_loop(cond, body, (low, high))
return solution
def sqrt_cubed(x, tangent_solve=scalar_solve):
f = lambda y: y ** 2 - x ** 3
return lax.custom_root(f, 0.0, binary_search, tangent_solve)
value, grad = api.value_and_grad(sqrt_cubed)(5.0)
self.assertAllClose(value, 5 ** 1.5, check_dtypes=False, rtol=1e-6)
self.assertAllClose(grad, api.grad(pow)(5.0, 1.5), check_dtypes=False,
rtol=1e-7)
jtu.check_grads(sqrt_cubed, (5.0,), order=2,
rtol={jnp.float32: 1e-2, jnp.float64: 1e-3})
inputs = jnp.array([4.0, 5.0])
results = api.vmap(sqrt_cubed)(inputs)
self.assertAllClose(results, inputs ** 1.5, check_dtypes=False)
results = api.jit(sqrt_cubed)(5.0)
self.assertAllClose(results, 5.0 ** 1.5, check_dtypes=False,
rtol={np.float64:1e-7})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_root_vector_with_solve_closure(self):
def vector_solve(f, y):
return jnp.linalg.solve(api.jacobian(f)(y), y)
def linear_solve(a, b):
f = lambda y: high_precision_dot(a, y) - b
x0 = jnp.zeros_like(b)
solution = jnp.linalg.solve(a, b)
oracle = lambda func, x0: solution
return lax.custom_root(f, x0, oracle, vector_solve)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(linear_solve, (a, b), order=2,
atol={np.float32: 1e-2, np.float64: 1e-11})
actual = api.jit(linear_solve)(a, b)
expected = jnp.linalg.solve(a, b)
self.assertAllClose(expected, actual)
def test_custom_root_with_custom_linear_solve(self):
def linear_solve(a, b):
f = lambda x: high_precision_dot(a, x) - b
factors = jsp.linalg.cho_factor(a)
cho_solve = lambda f, b: jsp.linalg.cho_solve(factors, b)
def pos_def_solve(g, b):
return lax.custom_linear_solve(g, b, cho_solve, symmetric=True)
return lax.custom_root(f, b, cho_solve, pos_def_solve)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
actual = linear_solve(high_precision_dot(a, a.T), b)
expected = jnp.linalg.solve(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual)
actual = api.jit(linear_solve)(high_precision_dot(a, a.T), b)
expected = jnp.linalg.solve(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual)
jtu.check_grads(lambda x, y: linear_solve(high_precision_dot(x, x.T), y),
(a, b), order=2, rtol={jnp.float32: 1e-2})
def test_custom_root_errors(self):
with self.assertRaisesRegex(TypeError, re.escape("f() output pytree")):
lax.custom_root(lambda x: (x, x), 0.0, lambda f, x: x, lambda f, x: x)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_root(lambda x: x, 0.0, lambda f, x: (x, x), lambda f, x: x)
def dummy_root_usage(x):
f = lambda y: x - y
return lax.custom_root(f, 0.0, lambda f, x: x, lambda f, x: (x, x))
with self.assertRaisesRegex(
TypeError, re.escape("tangent_solve() output pytree")):
api.jvp(dummy_root_usage, (0.0,), (0.0,))
@parameterized.named_parameters(
{"testcase_name": "nonsymmetric", "symmetric": False},
{"testcase_name": "symmetric", "symmetric": True},
)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve(self, symmetric):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(
matvec, b, explicit_jacobian_solve, explicit_jacobian_solve,
symmetric=symmetric)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
if symmetric:
a = a + a.T
b = rng.randn(3)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
expected = jnp.linalg.solve(a, b)
actual = api.jit(linear_solve)(a, b)
self.assertAllClose(expected, actual)
c = rng.randn(3, 2)
expected = jnp.linalg.solve(a, c)
actual = api.vmap(linear_solve, (None, 1), 1)(a, c)
self.assertAllClose(expected, actual)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_zeros(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, explicit_jacobian_solve,
explicit_jacobian_solve)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
jtu.check_grads(lambda x: linear_solve(x, b), (a,), order=2,
rtol={np.float32: 5e-3})
jtu.check_grads(lambda x: linear_solve(a, x), (b,), order=2,
rtol={np.float32: 5e-3})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_iterative(self):
def richardson_iteration(matvec, b, omega=0.1, tolerance=1e-6):
# Equivalent to vanilla gradient descent:
# https://en.wikipedia.org/wiki/Modified_Richardson_iteration
def cond(x):
return jnp.linalg.norm(matvec(x) - b) > tolerance
def body(x):
return x + omega * (b - matvec(x))
return lax.while_loop(cond, body, b)
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, richardson_iteration,
richardson_iteration)
def build_and_solve(a, b):
# intentionally non-linear in a and b
matvec = partial(high_precision_dot, jnp.exp(a))
return matrix_free_solve(matvec, jnp.cos(b))
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = jnp.linalg.solve(jnp.exp(a), jnp.cos(b))
actual = build_and_solve(a, b)
self.assertAllClose(expected, actual, atol=1e-5)
jtu.check_grads(build_and_solve, (a, b), atol=1e-5, order=2,
rtol={jnp.float32: 6e-2, jnp.float64: 2e-3})
# vmap across an empty dimension
jtu.check_grads(
api.vmap(build_and_solve), (a[None, :, :], b[None, :]),
atol=1e-5,
order=2,
rtol={jnp.float32: 6e-2, jnp.float64: 2e-3})
def test_custom_linear_solve_cholesky(self):
def positive_definite_solve(a, b):
factors = jsp.linalg.cho_factor(a)
def solve(matvec, x):
return jsp.linalg.cho_solve(factors, x)
matvec = partial(high_precision_dot, a)
return lax.custom_linear_solve(matvec, b, solve, symmetric=True)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = jnp.linalg.solve(np.asarray(posify(a)), b)
actual = positive_definite_solve(posify(a), b)
self.assertAllClose(expected, actual)
actual = api.jit(positive_definite_solve)(posify(a), b)
self.assertAllClose(expected, actual)
# numerical gradients are only well defined if ``a`` is guaranteed to be
# positive definite.
jtu.check_grads(
lambda x, y: positive_definite_solve(posify(x), y),
(a, b), order=2, rtol=1e-2)
def test_custom_linear_solve_complex(self):
def solve(a, b):
def solve(matvec, x):
return jsp.linalg.solve(a, x)
def tr_solve(matvec, x):
return jsp.linalg.solve(a.T, x)
matvec = partial(high_precision_dot, a)
return lax.custom_linear_solve(matvec, b, solve, tr_solve)
rng = np.random.RandomState(0)
a = 0.5 * rng.randn(2, 2) + 0.5j * rng.randn(2, 2)
b = 0.5 * rng.randn(2) + 0.5j * rng.randn(2)
jtu.check_grads(solve, (a, b), order=2, rtol=1e-2)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_lu(self):
def linear_solve(a, b):
a_factors = jsp.linalg.lu_factor(a)
at_factors = jsp.linalg.lu_factor(a.T)
def solve(matvec, x):
return jsp.linalg.lu_solve(a_factors, x)
def transpose_solve(vecmat, x):
return jsp.linalg.lu_solve(at_factors, x)
return lax.custom_linear_solve(
partial(high_precision_dot, a), b, solve, transpose_solve)
rng = np.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
expected = jnp.linalg.solve(a, b)
actual = linear_solve(a, b)
self.assertAllClose(expected, actual)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
# regression test for https://github.com/google/jax/issues/1536
jtu.check_grads(api.jit(linear_solve), (a, b), order=2,
rtol={np.float32: 2e-3})
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_without_transpose_solve(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(api.jacobian(matvec)(b), b))
def loss(a, b):
matvec = partial(high_precision_dot, a)
x = lax.custom_linear_solve(matvec, b, explicit_jacobian_solve)
return jnp.sum(x)
rng = np.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(loss, (a, b), order=2, modes=['fwd'],
atol={np.float32: 2e-3, np.float64: 1e-11})
jtu.check_grads(api.vmap(loss), (a[None,:,:], b[None,:]), order=2,
modes=['fwd'], atol={np.float32: 2e-3, np.float64: 1e-11})
with self.assertRaisesRegex(TypeError, "transpose_solve required"):
api.grad(loss)(a, b)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def test_custom_linear_solve_pytree(self):
def unrolled_matvec(mat, x):
result = []
for i in range(len(mat)):
v = 0
for j in range(len(x)):
if mat[i][j] is not None:
v += mat[i][j] * x[j]
result.append(v)
return result
def unrolled_substitution_solve(matvec, b, lower_tri):
zero = jnp.zeros(())
one = jnp.ones(())
x = [zero for _ in b]
ordering = range(len(b)) if lower_tri else range(len(b) - 1, -1, -1)
for i in ordering:
residual = b[i] - matvec(x)[i]
diagonal = matvec([one if i == j else zero for j in range(len(b))])[i]
x[i] = residual / diagonal
return x
def custom_unrolled_lower_tri_solve(mat, b):
return lax.custom_linear_solve(
partial(unrolled_matvec, mat), b,
partial(unrolled_substitution_solve, lower_tri=True),
partial(unrolled_substitution_solve, lower_tri=False))
mat = [[1.0, None, None, None, None, None, None],
[1.0, 1.0, None, None, None, None, None],
[None, 1.0, 1.0, None, None, None, None],
[None, None, 1.0, 1.0, None, None, None],
[None, None, None, 1.0, 1.0, None, None],
[None, None, None, None, None, 2.0, None],
[None, None, None, None, None, 4.0, 3.0]]
rng = np.random.RandomState(0)
b = list(rng.randn(7))
# Non-batched
jtu.check_grads(custom_unrolled_lower_tri_solve, (mat, b), order=2,
rtol={jnp.float32: 2e-2})
# Batch one element of b (which, because of unrolling, should only affect
# the first block of outputs)
b_bat = list(b)
b_bat[3] = rng.randn(3)
jtu.check_grads(
api.vmap(
custom_unrolled_lower_tri_solve,
in_axes=(None, [None, None, None, 0, None, None, None]),
out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b_bat),
order=2,
rtol={jnp.float32: 1e-2})
# Batch one element of mat (again only affecting first block)
mat[2][1] = rng.randn(3)
mat_axis_tree = [
[0 if i == 2 and j == 1 else None for j in range(7)] for i in range(7)
]
jtu.check_grads(
api.vmap(
custom_unrolled_lower_tri_solve,
in_axes=(mat_axis_tree, None),
out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b),
order=2)
def test_custom_linear_solve_errors(self):
solve = lambda f, x: x
with self.assertRaisesRegex(TypeError, re.escape("matvec() output pytree")):
lax.custom_linear_solve(lambda x: [x], 1.0, solve, solve)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: [x], solve)
with self.assertRaisesRegex(
TypeError, re.escape("transpose_solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, solve, lambda f, x: [x])
with self.assertRaisesRegex(ValueError, re.escape("solve() output shapes")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: jnp.ones(2), solve)
def bad_matvec_usage(a):
return lax.custom_linear_solve(
lambda x: a * jnp.ones(2), 1.0, solve, solve)
with self.assertRaisesRegex(ValueError, re.escape("matvec() output shapes")):
api.jvp(bad_matvec_usage, (1.0,), (1.0,))
def testIssue810(self):
def loss(A):
def step(x, i):
return jnp.matmul(A, x), None
init_x = jnp.zeros(A.shape[-1:])
last_x, _ = lax.scan(step, init_x, jnp.arange(10))
return jnp.sum(last_x)
A = jnp.zeros((3, 3))
# The second DUS was unnecessarily replicating A across time.
# We check XLA because _scan_impl is "underneath" the jaxpr language.
s = str(api.xla_computation(api.grad(loss))(A).as_hlo_text())
assert s.count("dynamic-update-slice(") < 2
def testScanLengthArg(self):
def arange(n):
return lax.scan(lambda c, _: (c + 1, c), 0, None, length=n)[1]
ans = arange(10)
expected = np.arange(10)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_while_loop_of_pmap(self):
# code from jsnoek@
def body(i, x):
result = api.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x) # noqa: F821
ans = f_loop(jnp.ones(api.device_count()))
del body, f_loop
def body2(i, x):
result = jnp.broadcast_to(jnp.sin(x).sum(), x.shape)
return result + x
g_loop = lambda x: lax.fori_loop(0, 3, body2, x)
expected = g_loop(jnp.ones(api.device_count()))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_while_loop_of_pmap_error_message(self):
def body(i, x):
result = api.pmap(lambda z: lax.psum(jnp.sin(z), 'i'), axis_name='i')(x)
return result + x
f_loop = lambda x: lax.fori_loop(0, 3, body, x)
too_big = 2 * api.device_count()
self.assertRaisesRegex(
ValueError,
re.escape(
"compiling a primitive computation `while` that requires {} "
"replicas, but only {} XLA devices are available on backend {}."
.format(too_big, api.device_count(), jtu.device_under_test())),
lambda: f_loop(jnp.ones(too_big)))
@parameterized.named_parameters(
{"testcase_name": "_{}".format(scan_name),
"scan": scan_impl}
for scan_impl, scan_name in SCAN_IMPLS)
def test_scan_reverse(self, scan):
def cumsum(x, reverse):
return scan(lambda c, x: (c + x, c + x), 0, x, reverse=reverse)[1]
x = np.array([3, 1, 4, 1, 5, 9])
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
with api.disable_jit():
self.assertAllClose(np.cumsum(x), cumsum(x, False), check_dtypes=False)
with api.disable_jit():
self.assertAllClose(np.cumsum(x[::-1])[::-1], cumsum(x, True), check_dtypes=False)
def test_scan_unroll(self):
d = jnp.ones(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = jnp.cos(jnp.sum(jnp.sin(a)) + jnp.sum(jnp.cos(c)) + jnp.sum(jnp.tan(d)))
c = jnp.sin(c * b)
assert b.shape == ()
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
scan = lambda c, xs: lax.scan(f, c, xs)
scan_unrolled = lambda c, xs: lax.scan(f, c, xs, unroll=2)
# jaxprs should be the same size
self.assertEqual(
len(str(api.make_jaxpr(scan)(c, xs))),
len(str(api.make_jaxpr(scan_unrolled)(c, xs))))
# but HLO should grow due to unrolling
self.assertLess(
len(str(api.xla_computation(scan)(c, xs).as_hlo_text())),
len(str(api.xla_computation(scan_unrolled)(c, xs).as_hlo_text())))
def test_disable_jit_cond_with_vmap(self):
# https://github.com/google/jax/issues/3093
def fn(t):
return lax.cond(t > 0, 0, lambda x: 0, 0, lambda x: 1)
fn = api.vmap(fn)
with api.disable_jit():
_ = fn(jnp.array([1])) # doesn't crash
def test_disable_jit_while_loop_with_vmap(self):
def trivial_while(y):
return lax.while_loop(lambda x: x < 10.0, lambda x: x + 1.0, y)
with api.disable_jit():
api.vmap(trivial_while)(jnp.array([3.0,4.0]))
def test_vmaps_of_while_loop(self):
# https://github.com/google/jax/issues/3164
def f(x, n): return lax.fori_loop(0, n, lambda _, x: x + 1, x)
x, n = jnp.arange(3), jnp.arange(4)
api.vmap(api.vmap(f, (None, 0)), (0, None))(x, n) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"_{shape}_axis={axis}",
"shape": shape, "axis": axis}
for shape in [
[0], [1], [2], [3], [5], [10], [1000],
[2, 3], [7, 5], [5, 6, 7]
]
for axis in range(-len(shape), len(shape) - 1))
def testAssociativeScanUnstructured(self, shape, axis):
data = np.arange(np.prod(shape)).reshape(shape) + 7
expected = np.cumsum(data, axis=axis)
result = lax.associative_scan(operator.add, data, axis=axis)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanUnstructured1000Reverse(self):
data = np.arange(1000) + 32
expected = np.cumsum(data[::-1])[::-1]
result = lax.associative_scan(operator.add, data, reverse=True)
self.assertAllClose(result, expected, check_dtypes=False)
def testAssociativeScanStructured3(self):
pair = collections.namedtuple('pair', ('first', 'second'))
data = pair(first=np.array([0., 1., 2.]),
second=np.array([0., 10., 20.]))
def fn(a, b):
return pair(first=a.first + b.first,
second=a.second + b.second)
result = lax.associative_scan(fn, elems=data)
self.assertAllClose(result.first, np.array([0., 1., 3.]),
check_dtypes=False)
self.assertAllClose(result.second, np.array([0., 10., 30.]),
check_dtypes=False)
def test_scan_typecheck_param(self):
d = jnp.ones(2)
def f(c, a):
b = jnp.cos(jnp.sum(a) + jnp.sum(c) + jnp.sum(d))
c = jnp.sin(c * b)
return c, b
xs = jnp.ones((5, 3))
c = jnp.ones(4)
scan_fun = lambda c, xs: lax.scan(f, c, xs)
def new_jaxpr():
jaxpr = api.make_jaxpr(scan_fun)(c, xs).jaxpr
scan = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'scan')
return jaxpr, scan
jaxpr, eqn = new_jaxpr()
eqn.params['reverse'] = 4
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param reverse of type int, bool required: 4'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['num_consts'] = -3
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid scan param num_consts of type int, '
'non-negative int required: -3'),
lambda: core.check_jaxpr(jaxpr))
def test_cond_typecheck_param(self):
def new_jaxpr():
jaxpr = api.make_jaxpr(
lambda x: lax.switch(0, [jnp.sin, jnp.cos], x))(1.).jaxpr
cond = next(eqn for eqn in jaxpr.eqns if eqn.primitive.name == 'cond')
return jaxpr, cond
jaxpr, eqn = new_jaxpr()
eqn.params['branches'] = (4, 2)
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid cond param branches of type tuple, '
'tuple of ClosedJaxpr required: (4, 2)'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['linear'] = (4, 2)
self.assertRaisesRegex(
core.JaxprTypeError,
re.escape('invalid cond param linear of type tuple, '
'tuple of bool required: (4, 2)'),
lambda: core.check_jaxpr(jaxpr))
jaxpr, eqn = new_jaxpr()
eqn.params['linear'] = 'multi\nline'
self.assertRaisesRegex(
core.JaxprTypeError,
r'invalid cond param linear of type str, '
r'tuple of bool required:\nmulti\nline',
lambda: core.check_jaxpr(jaxpr))
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_scan_init_weak_type(self, dtype):
def func(carry, x):
return carry + x, x
init_weak = 0
x = jnp.ones(5, dtype=dtype)
carry, result = lax.scan(func, init_weak, x)
self.assertEqual(carry, x.sum())
self.assertArraysEqual(result, x)
@parameterized.named_parameters(
{"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
for dtype in jtu.dtypes.all_integer)
def test_while_loop_init_weak_type(self, dtype):
def cond_fun(val):
return val < 2
def body_fun(val):
return val + increment
increment = jnp.array(1, dtype=dtype)
init_weak = 0
result = lax.while_loop(cond_fun, body_fun, init_weak)
self.assertArraysEqual(result, jnp.full_like(increment, 2))
def test_scan_vjp_forwards_extensive_residuals(self):
def cumprod(x):
s = jnp.ones((2, 32), jnp.float32)
return lax.scan(lambda s, x: (x*s, s), s, x)
rng = np.random.RandomState(1234)
x = jnp.asarray(rng.randn(32, 2, 32).astype('float32'))
_, vjp_fun = api.vjp(cumprod, x)
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIs(ext_res, x)
x = rng.randn(32, 2, 32).astype('float32')
_, vjp_fun = api.vjp(cumprod, x)
*_, ext_res = vjp_fun.args[0].args[0]
self.assertIsInstance(ext_res, xla.DeviceArray)
def test_scan_vmap_collectives(self):
def scan_f(state, x):
s = lax.psum(state, 'i') * x
return state, s
def scan(state, xs):
return lax.scan(scan_f, state, xs)
scan_v = api.vmap(scan, in_axes=0, out_axes=0, axis_name='i')
self.assertAllClose(
scan_v(jnp.ones([1]), jnp.arange(5).reshape((1, 5))),
(jnp.array([1.]), jnp.array([[0., 1., 2., 3., 4.]])))
def test_xla_cpu_gpu_loop_cond_bug(self):
def deriv(f):
return lambda x, *args: jax.linearize(lambda x: f(x, *args), x)[1](1.0)
def _while_loop(cond_fun, body_fun, init_val, max_iter):
def _iter(val):
next_val = body_fun(val)
next_cond = True
return next_val, next_cond
def _fun(tup, _):
val, cond = tup
return jax.lax.cond(cond, _iter, lambda x: (x, False), val), _
init = (init_val, cond_fun(init_val))
return jax.lax.scan(_fun, init, None, length=max_iter)[0][0]
def my_pow(x, y):
def body_fun(val):
return val * x
def cond_fun(val):
return True
return _while_loop(cond_fun, body_fun, 1.0, y)
self.assertAllClose(deriv(my_pow)(3.0, 1), 1.0, check_dtypes=False)
def test_unexpected_tracer_error(self):
with self.assertRaisesRegex(core.UnexpectedTracerError,
"transformed by while_loop"):
lst = []
def side_effecting_body(val):
lst.append(val)
return val+1
lax.while_loop(lambda x: x < 2, side_effecting_body, 1)
lst[0] += 1
with self.assertRaisesRegex(core.UnexpectedTracerError,
"transformed by scan"):
lst = []
def side_effecting_scan(carry, val):
lst.append(val)
return carry, val+1
lax.scan(side_effecting_scan, None, jnp.ones((2, 2)))
lst[0] += 1
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| true
| true
|
790702cdce5fb7ac74a8a831cfb82e00313e3d81
| 907
|
py
|
Python
|
mysite/blog/migrations/0001_initial.py
|
uzzal71/Django_blog
|
096c0bb0057cc593a10eeff2ef1afecd7a6c1cf3
|
[
"MIT"
] | 1
|
2019-01-16T05:05:21.000Z
|
2019-01-16T05:05:21.000Z
|
mysite/blog/migrations/0001_initial.py
|
uzzal71/Django_blog
|
096c0bb0057cc593a10eeff2ef1afecd7a6c1cf3
|
[
"MIT"
] | null | null | null |
mysite/blog/migrations/0001_initial.py
|
uzzal71/Django_blog
|
096c0bb0057cc593a10eeff2ef1afecd7a6c1cf3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.4 on 2018-12-28 02:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.275862
| 120
| 0.637266
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true
| true
|
790704990ad2011fcbab4c18a03a039065742810
| 1,031
|
py
|
Python
|
config_manager/namespace.py
|
tbeckham/DeploymentManager
|
c1b2ba47d1732859ff458eb934da671fb0dad37f
|
[
"Apache-2.0"
] | null | null | null |
config_manager/namespace.py
|
tbeckham/DeploymentManager
|
c1b2ba47d1732859ff458eb934da671fb0dad37f
|
[
"Apache-2.0"
] | null | null | null |
config_manager/namespace.py
|
tbeckham/DeploymentManager
|
c1b2ba47d1732859ff458eb934da671fb0dad37f
|
[
"Apache-2.0"
] | null | null | null |
# __author__ = 'clarkmatthew'
#
import json
class Namespace(object):
"""
Convert dict (if provided) into attributes and return a somewhat
generic object
"""
def __init__(self, newdict=None):
if newdict:
for key in newdict:
value = newdict[key]
try:
if isinstance(value, dict):
setattr(self, Namespace(value), key)
else:
setattr(self, key, value)
except:
print '"{0}" ---> "{1}" , type: "{2}"'.format(key,
value,
type(value))
raise
def _get_keys(self):
return vars(self).keys()
def _to_json(self):
return json.dumps(self,
default=lambda o: o.__dict__,
sort_keys=True,
indent=4)
| 30.323529
| 78
| 0.402522
|
import json
class Namespace(object):
"""
Convert dict (if provided) into attributes and return a somewhat
generic object
"""
def __init__(self, newdict=None):
if newdict:
for key in newdict:
value = newdict[key]
try:
if isinstance(value, dict):
setattr(self, Namespace(value), key)
else:
setattr(self, key, value)
except:
print '"{0}" ---> "{1}" , type: "{2}"'.format(key,
value,
type(value))
raise
def _get_keys(self):
return vars(self).keys()
def _to_json(self):
return json.dumps(self,
default=lambda o: o.__dict__,
sort_keys=True,
indent=4)
| false
| true
|
7907054ceea320e935931e7cb27969d1b35a9ad4
| 520
|
py
|
Python
|
Python 基础教程/1.5.2 商品买卖练习2.py
|
shao1chuan/pythonbook
|
cd9877d04e1e11422d38cc051e368d3d9ce2ab45
|
[
"MulanPSL-1.0"
] | 95
|
2020-10-11T04:45:46.000Z
|
2022-02-25T01:50:40.000Z
|
Python 基础教程/1.5.2 商品买卖练习2.py
|
shao1chuan/pythonbook
|
cd9877d04e1e11422d38cc051e368d3d9ce2ab45
|
[
"MulanPSL-1.0"
] | null | null | null |
Python 基础教程/1.5.2 商品买卖练习2.py
|
shao1chuan/pythonbook
|
cd9877d04e1e11422d38cc051e368d3d9ce2ab45
|
[
"MulanPSL-1.0"
] | 30
|
2020-11-05T09:01:00.000Z
|
2022-03-08T05:58:55.000Z
|
# 列表综合练习 写一个循环,不断的问用户想买什么,用户选择一个商品编号,
# 就把对应的商品添加到购物车里,最终用户输入q退出时,打印购物车里的商品列表
l1 = [['a',23],['b',34],['c',33],['d',345]]
l2 = []
print("商品列表****************")
for i in l1:
print(f"商品{i[0]},价格为{i[1]}")
while True:
name = input("输入商品名称:")
if name!="q":
for bb in l1:
if name==bb[0]:
print(f"你选择的是{name}")
l2.append(bb)
break
else:
print("你选择的没有再列表中")
else:
break
if len(l2)>0:
print(f"您选择的商品是{l2}")
| 20
| 43
| 0.471154
|
l1 = [['a',23],['b',34],['c',33],['d',345]]
l2 = []
print("商品列表****************")
for i in l1:
print(f"商品{i[0]},价格为{i[1]}")
while True:
name = input("输入商品名称:")
if name!="q":
for bb in l1:
if name==bb[0]:
print(f"你选择的是{name}")
l2.append(bb)
break
else:
print("你选择的没有再列表中")
else:
break
if len(l2)>0:
print(f"您选择的商品是{l2}")
| true
| true
|
79070565a1f8768ecb84f605b1862504cc489825
| 957
|
py
|
Python
|
plotly/validators/sankey/__init__.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/sankey/__init__.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 1
|
2020-12-15T16:56:11.000Z
|
2020-12-15T16:56:11.000Z
|
plotly/validators/sankey/__init__.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
from ._visible import VisibleValidator
from ._valuesuffix import ValuesuffixValidator
from ._valueformat import ValueformatValidator
from ._uid import UidValidator
from ._textfont import TextfontValidator
from ._stream import StreamValidator
from ._showlegend import ShowlegendValidator
from ._selectedpoints import SelectedpointsValidator
from ._orientation import OrientationValidator
from ._opacity import OpacityValidator
from ._node import NodeValidator
from ._name import NameValidator
from ._link import LinkValidator
from ._legendgroup import LegendgroupValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._domain import DomainValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._arrangement import ArrangementValidator
| 39.875
| 52
| 0.879833
|
from ._visible import VisibleValidator
from ._valuesuffix import ValuesuffixValidator
from ._valueformat import ValueformatValidator
from ._uid import UidValidator
from ._textfont import TextfontValidator
from ._stream import StreamValidator
from ._showlegend import ShowlegendValidator
from ._selectedpoints import SelectedpointsValidator
from ._orientation import OrientationValidator
from ._opacity import OpacityValidator
from ._node import NodeValidator
from ._name import NameValidator
from ._link import LinkValidator
from ._legendgroup import LegendgroupValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._domain import DomainValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._arrangement import ArrangementValidator
| true
| true
|
79070580cee376e382750abdbf7e53085f78557b
| 4,618
|
py
|
Python
|
opensanctions/crawlers/us_trade_csl.py
|
sanktio/opensanctions
|
318f54775b333fefb79e002042e6564b6a4fa5bc
|
[
"MIT"
] | 79
|
2021-02-04T11:20:43.000Z
|
2022-01-27T12:04:48.000Z
|
opensanctions/crawlers/us_trade_csl.py
|
sanktio/opensanctions
|
318f54775b333fefb79e002042e6564b6a4fa5bc
|
[
"MIT"
] | 101
|
2021-02-12T18:26:16.000Z
|
2022-01-27T14:01:53.000Z
|
opensanctions/crawlers/us_trade_csl.py
|
sanktio/opensanctions
|
318f54775b333fefb79e002042e6564b6a4fa5bc
|
[
"MIT"
] | 21
|
2021-02-02T12:59:08.000Z
|
2022-01-25T15:03:43.000Z
|
import json
from banal import ensure_list
from functools import lru_cache
from pantomime.types import JSON
from requests.exceptions import TooManyRedirects
from opensanctions.core import Dataset
from opensanctions import helpers as h
FORMATS = ["%d %b %Y", "%d %B %Y", "%Y", "%b %Y", "%B %Y"]
SDN = Dataset.require("us_ofac_sdn")
@lru_cache(maxsize=None)
def deref_url(context, url):
try:
res = context.http.get(url, stream=True)
return res.url
except TooManyRedirects:
return url
def parse_result(context, result):
type_ = result.pop("type", None)
schema = context.lookup_value("type", type_)
if schema is None:
context.log.error("Unknown result type", type=type_)
return
entity = context.make(schema)
entity.id = context.make_slug(result.pop("id"))
entity_number = result.pop("entity_number", None)
if entity_number is not None:
assert int(entity_number)
entity.id = SDN.make_slug(entity_number)
name = result.pop("name", None)
name = name.replace("and any successor, sub-unit, or subsidiary thereof", "")
entity.add("name", name)
for alias in ensure_list(result.pop("alt_names", "")):
entity.add("alias", alias.split("; "))
entity.add("notes", result.pop("remarks", None))
entity.add("country", result.pop("country", None))
if entity.schema.is_a("Person"):
entity.add("position", result.pop("title", None))
entity.add("nationality", result.pop("nationalities", None))
entity.add("nationality", result.pop("citizenships", None))
for dob in result.pop("dates_of_birth", []):
entity.add("birthDate", h.parse_date(dob, FORMATS))
entity.add("birthPlace", result.pop("places_of_birth", None))
elif entity.schema.is_a("Vessel"):
entity.add("flag", result.pop("vessel_flag", None))
entity.add("callSign", result.pop("call_sign", None))
entity.add("type", result.pop("vessel_type", None))
grt = result.pop("gross_registered_tonnage", None)
entity.add("grossRegisteredTonnage", grt)
gt = result.pop("gross_tonnage", None)
entity.add("tonnage", gt)
# TODO: make adjacent owner entity
result.pop("vessel_owner", None)
assert result.pop("title", None) is None
assert not len(result.pop("nationalities", []))
assert not len(result.pop("citizenships", []))
assert not len(result.pop("dates_of_birth", []))
assert not len(result.pop("places_of_birth", []))
for address in result.pop("addresses", []):
obj = h.make_address(
context,
street=address.get("address"),
city=address.get("city"),
postal_code=address.get("postal_code"),
region=address.get("state"),
country=address.get("country"),
)
h.apply_address(context, entity, obj)
for ident in result.pop("ids", []):
country = ident.pop("country")
entity.add("country", country)
h.apply_feature(
context,
entity,
ident.pop("type"),
ident.pop("number"),
country=country,
date_formats=FORMATS,
start_date=ident.pop("issue_date", None),
end_date=ident.pop("expiration_date", None),
)
sanction = context.make("Sanction")
sanction.id = context.make_id(entity.id, "Sanction")
sanction.add("entity", entity)
sanction.add("program", result.pop("programs", []))
sanction.add("status", result.pop("license_policy", []))
sanction.add("reason", result.pop("license_requirement", []))
sanction.add("reason", result.pop("federal_register_notice", None))
sanction.add("startDate", result.pop("start_date", None))
sanction.add("endDate", result.pop("end_date", None))
sanction.add("country", "us")
sanction.add("authority", result.pop("source", None))
# TODO: deref
source_url = deref_url(context, result.pop("source_information_url"))
sanction.add("sourceUrl", source_url)
result.pop("source_list_url")
# TODO: what is this?
result.pop("standard_order", None)
context.emit(sanction)
context.emit(entity, target=True, unique=True)
if len(result):
context.pprint(result)
def crawl(context):
path = context.fetch_resource("source.json", context.dataset.data.url)
context.export_resource(path, JSON, title=context.SOURCE_TITLE)
with open(path, "r") as file:
data = json.load(file)
for result in data.get("results"):
parse_result(context, result)
| 36.078125
| 81
| 0.637289
|
import json
from banal import ensure_list
from functools import lru_cache
from pantomime.types import JSON
from requests.exceptions import TooManyRedirects
from opensanctions.core import Dataset
from opensanctions import helpers as h
FORMATS = ["%d %b %Y", "%d %B %Y", "%Y", "%b %Y", "%B %Y"]
SDN = Dataset.require("us_ofac_sdn")
@lru_cache(maxsize=None)
def deref_url(context, url):
try:
res = context.http.get(url, stream=True)
return res.url
except TooManyRedirects:
return url
def parse_result(context, result):
type_ = result.pop("type", None)
schema = context.lookup_value("type", type_)
if schema is None:
context.log.error("Unknown result type", type=type_)
return
entity = context.make(schema)
entity.id = context.make_slug(result.pop("id"))
entity_number = result.pop("entity_number", None)
if entity_number is not None:
assert int(entity_number)
entity.id = SDN.make_slug(entity_number)
name = result.pop("name", None)
name = name.replace("and any successor, sub-unit, or subsidiary thereof", "")
entity.add("name", name)
for alias in ensure_list(result.pop("alt_names", "")):
entity.add("alias", alias.split("; "))
entity.add("notes", result.pop("remarks", None))
entity.add("country", result.pop("country", None))
if entity.schema.is_a("Person"):
entity.add("position", result.pop("title", None))
entity.add("nationality", result.pop("nationalities", None))
entity.add("nationality", result.pop("citizenships", None))
for dob in result.pop("dates_of_birth", []):
entity.add("birthDate", h.parse_date(dob, FORMATS))
entity.add("birthPlace", result.pop("places_of_birth", None))
elif entity.schema.is_a("Vessel"):
entity.add("flag", result.pop("vessel_flag", None))
entity.add("callSign", result.pop("call_sign", None))
entity.add("type", result.pop("vessel_type", None))
grt = result.pop("gross_registered_tonnage", None)
entity.add("grossRegisteredTonnage", grt)
gt = result.pop("gross_tonnage", None)
entity.add("tonnage", gt)
result.pop("vessel_owner", None)
assert result.pop("title", None) is None
assert not len(result.pop("nationalities", []))
assert not len(result.pop("citizenships", []))
assert not len(result.pop("dates_of_birth", []))
assert not len(result.pop("places_of_birth", []))
for address in result.pop("addresses", []):
obj = h.make_address(
context,
street=address.get("address"),
city=address.get("city"),
postal_code=address.get("postal_code"),
region=address.get("state"),
country=address.get("country"),
)
h.apply_address(context, entity, obj)
for ident in result.pop("ids", []):
country = ident.pop("country")
entity.add("country", country)
h.apply_feature(
context,
entity,
ident.pop("type"),
ident.pop("number"),
country=country,
date_formats=FORMATS,
start_date=ident.pop("issue_date", None),
end_date=ident.pop("expiration_date", None),
)
sanction = context.make("Sanction")
sanction.id = context.make_id(entity.id, "Sanction")
sanction.add("entity", entity)
sanction.add("program", result.pop("programs", []))
sanction.add("status", result.pop("license_policy", []))
sanction.add("reason", result.pop("license_requirement", []))
sanction.add("reason", result.pop("federal_register_notice", None))
sanction.add("startDate", result.pop("start_date", None))
sanction.add("endDate", result.pop("end_date", None))
sanction.add("country", "us")
sanction.add("authority", result.pop("source", None))
source_url = deref_url(context, result.pop("source_information_url"))
sanction.add("sourceUrl", source_url)
result.pop("source_list_url")
result.pop("standard_order", None)
context.emit(sanction)
context.emit(entity, target=True, unique=True)
if len(result):
context.pprint(result)
def crawl(context):
path = context.fetch_resource("source.json", context.dataset.data.url)
context.export_resource(path, JSON, title=context.SOURCE_TITLE)
with open(path, "r") as file:
data = json.load(file)
for result in data.get("results"):
parse_result(context, result)
| true
| true
|
790705dff7658fd0f54dc68f4be233c64ed3d9b8
| 18,305
|
py
|
Python
|
mcfly/modelgen.py
|
wadpac/mcfly
|
c288ba227df0e7423dccde63f9886b025ceec269
|
[
"Apache-2.0"
] | 1
|
2019-05-06T08:26:10.000Z
|
2019-05-06T08:26:10.000Z
|
mcfly/modelgen.py
|
wadpac/mcfly
|
c288ba227df0e7423dccde63f9886b025ceec269
|
[
"Apache-2.0"
] | null | null | null |
mcfly/modelgen.py
|
wadpac/mcfly
|
c288ba227df0e7423dccde63f9886b025ceec269
|
[
"Apache-2.0"
] | 1
|
2020-01-21T15:43:01.000Z
|
2020-01-21T15:43:01.000Z
|
#
# mcfly
#
# Copyright 2017 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM # Comment on HPC
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
"""
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
"""
models = []
for _ in range(0, number_of_models):
if model_type is None: # random model choice:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else: # user-defined model choice:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model # generate_model is a function
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
"""
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
output_dim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential() # initialize model
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
# reshape a 2 dimensional array per file/person/object into a
# 3 dimensional array
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
# filt: number of filters used in a layer
# filters: vector of filt values
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
# reshape 3 dimensional array back into a 2 dimensional array,
# but now with more dept as we have the the filters for each channel
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
#model.add(LSTM(units=lstm_dim, return_sequences=True,
# activation='tanh'))
# comment following line for HPC
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5)) # dropout before the dense layer
# # set up final dense layer such that every timestamp is given one
# # classification
# model.add(
# TimeDistributed(
# Dense(units=output_dim, kernel_regularizer=l2(regularization_rate))))
# model.add(Activation("softmax"))
# # Final classification layer - per timestep
# model.add(Lambda(lambda x: x[:, -1, :], output_shape=[output_dim]))
# Pool output of all timesteps and perform classification using pooled output
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
"""
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
"""
dim_length = x_shape[1] # number of samples in a time series
dim_channels = x_shape[2] # number of channels
outputdim = class_number # number of classes
weightinit = 'lecun_uniform' # weight initialization
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit)) # Fully connected layer
model.add(Activation('relu')) # Relu activation
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax")) # Final classification layer
# if class_number == 2:
# loss = 'binary_crossentropy'
# else:
# loss = 'categorical_crossentropy'
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
""" Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
""" Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
"""
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
""" Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
"""
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
""" Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
"""
result = 0.001 # Fixed learning rate for Adam #10 ** (-np.random.uniform(low, high))
return result
def get_regularization(low=1, high=4):
""" Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
"""
return 10 ** (-np.random.uniform(low, high))
| 38.618143
| 92
| 0.667632
|
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Lambda, \
Convolution2D, Flatten, \
Reshape, LSTM, Dropout, TimeDistributed, BatchNormalization, \
GlobalAveragePooling1D, Bidirectional
from keras.layers import CuDNNLSTM
from keras.regularizers import l2
from keras.optimizers import Adam
import numpy as np
def generate_models(
x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'],
model_type=None,
cnn_min_layers=5, cnn_max_layers=10,
cnn_min_filters=25, cnn_max_filters=100,
cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000,
deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7,
deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100,
deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3,
deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500,
low_lr=1, high_lr=4, low_reg=1, high_reg=3
):
models = []
for _ in range(0, number_of_models):
if model_type is None:
current_model_type = 'CNN' if np.random.random(
) < 0.5 else 'DeepConvLSTM'
else:
current_model_type = model_type
generate_model = None
if current_model_type == 'CNN':
generate_model = generate_CNN_model
hyperparameters = generate_CNN_hyperparameter_set(
min_layers=cnn_min_layers, max_layers=cnn_max_layers,
min_filters=cnn_min_filters, max_filters=cnn_max_filters,
min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
if current_model_type == 'DeepConvLSTM':
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=deepconvlstm_min_conv_layers,
max_conv_layers=deepconvlstm_max_conv_layers,
min_conv_filters=deepconvlstm_min_conv_filters,
max_conv_filters=deepconvlstm_max_conv_filters,
min_lstm_layers=deepconvlstm_min_lstm_layers,
max_lstm_layers=deepconvlstm_max_lstm_layers,
min_lstm_dims=deepconvlstm_min_lstm_dims,
max_lstm_dims=deepconvlstm_max_lstm_dims,
low_lr=low_lr, high_lr=high_lr, low_reg=low_reg,
high_reg=high_reg)
models.append(
(generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters),
hyperparameters, current_model_type))
return models
def generate_DeepConvLSTM_model(
x_shape, class_number, filters, lstm_dims, learning_rate=0.01,
regularization_rate=0.01, metrics=['accuracy']):
dim_length = x_shape[1]
dim_channels = x_shape[2]
output_dim = class_number
weightinit = 'lecun_uniform'
model = Sequential()
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
model.add(
Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
model.add(
Convolution2D(filt, kernel_size=(3, 1), padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Reshape(target_shape=(dim_length, filters[-1] * dim_channels)))
for lstm_dim in lstm_dims:
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Activation("softmax"))
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes,
learning_rate=0.01, regularization_rate=0.01,
metrics=['accuracy']):
dim_length = x_shape[1]
dim_channels = x_shape[2]
outputdim = class_number
weightinit = 'lecun_uniform'
model = Sequential()
model.add(
BatchNormalization(
input_shape=(
dim_length,
dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same',
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes,
kernel_regularizer=l2(regularization_rate),
kernel_initializer=weightinit))
model.add(Activation('relu'))
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation("softmax"))
loss = 'categorical_crossentropy'
model.compile(loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics)
return model
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10,
min_filters=10, max_filters=100,
min_fc_nodes=10, max_fc_nodes=2000,
low_lr=1, high_lr=4, low_reg=1,
high_reg=4):
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, max_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_filters, max_filters + 1, number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(
min_fc_nodes, max_fc_nodes + 1)
return hyperparameters
def generate_DeepConvLSTM_hyperparameter_set(
min_conv_layers=1, max_conv_layers=10,
min_conv_filters=10, max_conv_filters=100,
min_lstm_layers=1, max_lstm_layers=5,
min_lstm_dims=10, max_lstm_dims=100,
low_lr=1, high_lr=4, low_reg=1, high_reg=4):
hyperparameters = generate_base_hyper_parameter_set(
low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(
min_conv_layers, max_conv_layers + 1)
hyperparameters['filters'] = np.random.randint(
min_conv_filters, max_conv_filters + 1, number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(
min_lstm_layers, max_lstm_layers + 1)
hyperparameters['lstm_dims'] = np.random.randint(
min_lstm_dims, max_lstm_dims + 1, number_of_lstm_layers).tolist()
return hyperparameters
def generate_base_hyper_parameter_set(
low_lr=1,
high_lr=4,
low_reg=1,
high_reg=4):
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(
low_reg, high_reg)
return hyperparameters
def get_learning_rate(low=1, high=4):
result = 0.001 ation(low=1, high=4):
return 10 ** (-np.random.uniform(low, high))
| true
| true
|
790705e67f37b4b782a257841c59890a230aee52
| 14,405
|
py
|
Python
|
pulp/apis/gurobi_api.py
|
smipperat/pulp
|
b13f6e75bd0d0132180d0ee9333b2351c8327d66
|
[
"MIT"
] | 1
|
2022-01-19T04:02:46.000Z
|
2022-01-19T04:02:46.000Z
|
pulp/apis/gurobi_api.py
|
smipperat/pulp
|
b13f6e75bd0d0132180d0ee9333b2351c8327d66
|
[
"MIT"
] | 1
|
2021-11-19T07:21:48.000Z
|
2021-11-19T07:21:48.000Z
|
pulp/apis/gurobi_api.py
|
smipperat/pulp
|
b13f6e75bd0d0132180d0ee9333b2351c8327d66
|
[
"MIT"
] | 1
|
2022-01-14T17:15:38.000Z
|
2022-01-14T17:15:38.000Z
|
# PuLP : Python LP Modeler
# Version 1.4.2
# Copyright (c) 2002-2005, Jean-Sebastien Roy (js@jeannot.org)
# Modifications Copyright (c) 2007- Stuart Anthony Mitchell (s.mitchell@auckland.ac.nz)
# $Id:solvers.py 1791 2008-04-23 22:54:34Z smit023 $
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
from .core import LpSolver_CMD, LpSolver, subprocess, PulpSolverError, clock, log
from .core import gurobi_path
import os
from uuid import uuid4
import sys
from .. import constants
import warnings
# to import the gurobipy name into the module scope
gurobipy = None
class GUROBI(LpSolver):
"""
The Gurobi LP/MIP solver (via its python interface)
The Gurobi variables are available (after a solve) in var.solverVar
Constriaints in constraint.solverConstraint
and the Model is in prob.solverModel
"""
try:
sys.path.append(gurobi_path)
# to import the name into the module scope
global gurobipy
import gurobipy
except: # FIXME: Bug because gurobi returns
#a gurobi exception on failed imports
def available(self):
"""True if the solver is available"""
return False
def actualSolve(self, lp, callback = None):
"""Solve a well formulated lp problem"""
raise PulpSolverError("GUROBI: Not Available")
else:
def __init__(self,
mip = True,
msg = True,
timeLimit = None,
epgap = None,
**solverParams):
"""
Initializes the Gurobi solver.
@param mip: if False the solver will solve a MIP as an LP
@param msg: displays information from the solver to stdout
@param timeLimit: sets the maximum time for solution
@param epgap: sets the integer bound gap
"""
LpSolver.__init__(self, mip, msg)
self.timeLimit = timeLimit
self.epgap = epgap
#set the output of gurobi
if not self.msg:
gurobipy.setParam("OutputFlag", 0)
#set the gurobi parameter values
for key,value in solverParams.items():
gurobipy.setParam(key, value)
def findSolutionValues(self, lp):
model = lp.solverModel
solutionStatus = model.Status
GRB = gurobipy.GRB
# TODO: check status for Integer Feasible
gurobiLpStatus = {GRB.OPTIMAL: constants.LpStatusOptimal,
GRB.INFEASIBLE: constants.LpStatusInfeasible,
GRB.INF_OR_UNBD: constants.LpStatusInfeasible,
GRB.UNBOUNDED: constants.LpStatusUnbounded,
GRB.ITERATION_LIMIT: constants.LpStatusNotSolved,
GRB.NODE_LIMIT: constants.LpStatusNotSolved,
GRB.TIME_LIMIT: constants.LpStatusNotSolved,
GRB.SOLUTION_LIMIT: constants.LpStatusNotSolved,
GRB.INTERRUPTED: constants.LpStatusNotSolved,
GRB.NUMERIC: constants.LpStatusNotSolved,
}
#populate pulp solution values
try:
for var, value in zip(lp.variables(), model.getAttr(GRB.Attr.X, model.getVars())):
var.varValue = value
except (gurobipy.GurobiError, AttributeError):
pass
try:
for var, value in zip(lp.variables(), model.getAttr(GRB.Attr.RC, model.getVars())):
var.dj = value
except (gurobipy.GurobiError, AttributeError):
pass
#put pi and slack variables against the constraints
try:
for constr, value in zip(lp.constraints.values(), model.getAttr(GRB.Pi, model.getConstrs())):
constr.pi = value
except (gurobipy.GurobiError, AttributeError):
pass
try:
for constr, value in zip(lp.constraints.values(), model.getAttr(GRB.Slack, model.getConstrs())):
constr.slack = value
except (gurobipy.GurobiError, AttributeError):
pass
if self.msg:
print("Gurobi status=", solutionStatus)
lp.resolveOK = True
for var in lp.variables():
var.isModified = False
status = gurobiLpStatus.get(solutionStatus, constants.LpStatusUndefined)
lp.assignStatus(status)
return status
def available(self):
"""True if the solver is available"""
return True
def callSolver(self, lp, callback = None):
"""Solves the problem with gurobi
"""
#solve the problem
self.solveTime = -clock()
lp.solverModel.optimize(callback = callback)
self.solveTime += clock()
def buildSolverModel(self, lp):
"""
Takes the pulp lp model and translates it into a gurobi model
"""
log.debug("create the gurobi model")
lp.solverModel = gurobipy.Model(lp.name)
log.debug("set the sense of the problem")
if lp.sense == constants.LpMaximize:
lp.solverModel.setAttr("ModelSense", -1)
if self.timeLimit:
lp.solverModel.setParam("TimeLimit", self.timeLimit)
if self.epgap:
lp.solverModel.setParam("MIPGap", self.epgap)
log.debug("add the variables to the problem")
for var in lp.variables():
lowBound = var.lowBound
if lowBound is None:
lowBound = -gurobipy.GRB.INFINITY
upBound = var.upBound
if upBound is None:
upBound = gurobipy.GRB.INFINITY
obj = lp.objective.get(var, 0.0)
varType = gurobipy.GRB.CONTINUOUS
if var.cat == constants.LpInteger and self.mip:
varType = gurobipy.GRB.INTEGER
var.solverVar = lp.solverModel.addVar(lowBound, upBound,
vtype = varType,
obj = obj, name = var.name)
lp.solverModel.update()
log.debug("add the Constraints to the problem")
for name,constraint in lp.constraints.items():
#build the expression
expr = gurobipy.LinExpr(list(constraint.values()),
[v.solverVar for v in constraint.keys()])
if constraint.sense == constants.LpConstraintLE:
relation = gurobipy.GRB.LESS_EQUAL
elif constraint.sense == constants.LpConstraintGE:
relation = gurobipy.GRB.GREATER_EQUAL
elif constraint.sense == constants.LpConstraintEQ:
relation = gurobipy.GRB.EQUAL
else:
raise PulpSolverError('Detected an invalid constraint type')
constraint.solverConstraint = lp.solverModel.addConstr(expr,
relation, -constraint.constant, name)
lp.solverModel.update()
def actualSolve(self, lp, callback = None):
"""
Solve a well formulated lp problem
creates a gurobi model, variables and constraints and attaches
them to the lp model which it then solves
"""
self.buildSolverModel(lp)
#set the initial solution
log.debug("Solve the Model using gurobi")
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
def actualResolve(self, lp, callback = None):
"""
Solve a well formulated lp problem
uses the old solver and modifies the rhs of the modified constraints
"""
log.debug("Resolve the Model using gurobi")
for constraint in lp.constraints.values():
if constraint.modified:
constraint.solverConstraint.setAttr(gurobipy.GRB.Attr.RHS,
-constraint.constant)
lp.solverModel.update()
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
class GUROBI_CMD(LpSolver_CMD):
"""The GUROBI_CMD solver"""
def defaultPath(self):
return self.executableExtension("gurobi_cl")
def available(self):
"""True if the solver is available"""
return self.executable(self.path)
def actualSolve(self, lp):
"""Solve a well formulated lp problem"""
# TODO: workaround for python not reading LD_LIBRARY_PATH
# in my version of ubuntu
if 'GUROBI_HOME' in os.environ:
if 'LD_LIBRARY_PATH' not in os.environ:
os.environ['LD_LIBRARY_PATH'] = ""
os.environ['LD_LIBRARY_PATH'] += ':' + os.environ['GUROBI_HOME'] + "/lib"
if not self.executable(self.path):
raise PulpSolverError("PuLP: cannot execute "+self.path)
if not self.keepFiles:
uuid = uuid4().hex
tmpLp = os.path.join(self.tmpDir, "%s-pulp.lp" % uuid)
tmpSol = os.path.join(self.tmpDir, "%s-pulp.sol" % uuid)
tmpMst = os.path.join(self.tmpDir, "%s-pulp.mst" % uuid)
else:
tmpLp = lp.name+"-pulp.lp"
tmpSol = lp.name+"-pulp.sol"
tmpMst = lp.name + "-pulp.mst"
vs = lp.writeLP(tmpLp, writeSOS = 1)
try: os.remove(tmpSol)
except: pass
cmd = self.path
cmd += ' ' + ' '.join(['%s=%s' % (key, value)
for key, value in self.options])
cmd += ' ResultFile=%s' % tmpSol
if self.mip_start:
self.writesol(filename=tmpMst, vs=vs)
cmd += ' InputFile=%s' % tmpMst
if lp.isMIP():
if not self.mip:
warnings.warn('GUROBI_CMD does not allow a problem to be relaxed')
cmd += ' %s' % tmpLp
if self.msg:
pipe = None
else:
pipe = open(os.devnull, 'w')
return_code = subprocess.call(cmd.split(), stdout = pipe, stderr = pipe)
# Close the pipe now if we used it.
if pipe is not None:
pipe.close()
if return_code != 0:
raise PulpSolverError("PuLP: Error while trying to execute "+self.path)
if not os.path.exists(tmpSol):
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
values = reducedCosts = shadowPrices = slacks = None
else:
status, values, reducedCosts, shadowPrices, slacks = self.readsol(tmpSol)
if not self.keepFiles:
for f in [tmpSol, tmpMst, tmpLp, "gurobi.log"]:
try: os.remove(f)
except: pass
if status != constants.LpStatusInfeasible:
lp.assignVarsVals(values)
lp.assignVarsDj(reducedCosts)
lp.assignConsPi(shadowPrices)
lp.assignConsSlack(slacks)
lp.assignStatus(status)
return status
def readsol(self, filename):
"""Read a Gurobi solution file"""
with open(filename) as my_file:
try:
next(my_file) # skip the objective value
except StopIteration:
# Empty file not solved
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
return status, {}, {}, {}, {}
#We have no idea what the status is assume optimal
# TODO: check status for Integer Feasible
status = constants.LpStatusOptimal
shadowPrices = {}
slacks = {}
shadowPrices = {}
slacks = {}
values = {}
reducedCosts = {}
for line in my_file:
if line[0] != '#': #skip comments
name, value = line.split()
values[name] = float(value)
return status, values, reducedCosts, shadowPrices, slacks
def writesol(self, filename, vs):
"""Writes a GUROBI solution file"""
values = [(v.name, v.value()) for v in vs if v.value() is not None]
rows = []
for name, value in values:
rows.append('{} {}'.format(name, value))
with open(filename, 'w') as f:
f.write('\n'.join(rows))
return True
| 41.512968
| 112
| 0.567581
|
from .core import LpSolver_CMD, LpSolver, subprocess, PulpSolverError, clock, log
from .core import gurobi_path
import os
from uuid import uuid4
import sys
from .. import constants
import warnings
# to import the gurobipy name into the module scope
gurobipy = None
class GUROBI(LpSolver):
try:
sys.path.append(gurobi_path)
# to import the name into the module scope
global gurobipy
import gurobipy
except: # FIXME: Bug because gurobi returns
#a gurobi exception on failed imports
def available(self):
"""True if the solver is available"""
return False
def actualSolve(self, lp, callback = None):
"""Solve a well formulated lp problem"""
raise PulpSolverError("GUROBI: Not Available")
else:
def __init__(self,
mip = True,
msg = True,
timeLimit = None,
epgap = None,
**solverParams):
"""
Initializes the Gurobi solver.
@param mip: if False the solver will solve a MIP as an LP
@param msg: displays information from the solver to stdout
@param timeLimit: sets the maximum time for solution
@param epgap: sets the integer bound gap
"""
LpSolver.__init__(self, mip, msg)
self.timeLimit = timeLimit
self.epgap = epgap
#set the output of gurobi
if not self.msg:
gurobipy.setParam("OutputFlag", 0)
#set the gurobi parameter values
for key,value in solverParams.items():
gurobipy.setParam(key, value)
def findSolutionValues(self, lp):
model = lp.solverModel
solutionStatus = model.Status
GRB = gurobipy.GRB
# TODO: check status for Integer Feasible
gurobiLpStatus = {GRB.OPTIMAL: constants.LpStatusOptimal,
GRB.INFEASIBLE: constants.LpStatusInfeasible,
GRB.INF_OR_UNBD: constants.LpStatusInfeasible,
GRB.UNBOUNDED: constants.LpStatusUnbounded,
GRB.ITERATION_LIMIT: constants.LpStatusNotSolved,
GRB.NODE_LIMIT: constants.LpStatusNotSolved,
GRB.TIME_LIMIT: constants.LpStatusNotSolved,
GRB.SOLUTION_LIMIT: constants.LpStatusNotSolved,
GRB.INTERRUPTED: constants.LpStatusNotSolved,
GRB.NUMERIC: constants.LpStatusNotSolved,
}
#populate pulp solution values
try:
for var, value in zip(lp.variables(), model.getAttr(GRB.Attr.X, model.getVars())):
var.varValue = value
except (gurobipy.GurobiError, AttributeError):
pass
try:
for var, value in zip(lp.variables(), model.getAttr(GRB.Attr.RC, model.getVars())):
var.dj = value
except (gurobipy.GurobiError, AttributeError):
pass
#put pi and slack variables against the constraints
try:
for constr, value in zip(lp.constraints.values(), model.getAttr(GRB.Pi, model.getConstrs())):
constr.pi = value
except (gurobipy.GurobiError, AttributeError):
pass
try:
for constr, value in zip(lp.constraints.values(), model.getAttr(GRB.Slack, model.getConstrs())):
constr.slack = value
except (gurobipy.GurobiError, AttributeError):
pass
if self.msg:
print("Gurobi status=", solutionStatus)
lp.resolveOK = True
for var in lp.variables():
var.isModified = False
status = gurobiLpStatus.get(solutionStatus, constants.LpStatusUndefined)
lp.assignStatus(status)
return status
def available(self):
"""True if the solver is available"""
return True
def callSolver(self, lp, callback = None):
"""Solves the problem with gurobi
"""
#solve the problem
self.solveTime = -clock()
lp.solverModel.optimize(callback = callback)
self.solveTime += clock()
def buildSolverModel(self, lp):
"""
Takes the pulp lp model and translates it into a gurobi model
"""
log.debug("create the gurobi model")
lp.solverModel = gurobipy.Model(lp.name)
log.debug("set the sense of the problem")
if lp.sense == constants.LpMaximize:
lp.solverModel.setAttr("ModelSense", -1)
if self.timeLimit:
lp.solverModel.setParam("TimeLimit", self.timeLimit)
if self.epgap:
lp.solverModel.setParam("MIPGap", self.epgap)
log.debug("add the variables to the problem")
for var in lp.variables():
lowBound = var.lowBound
if lowBound is None:
lowBound = -gurobipy.GRB.INFINITY
upBound = var.upBound
if upBound is None:
upBound = gurobipy.GRB.INFINITY
obj = lp.objective.get(var, 0.0)
varType = gurobipy.GRB.CONTINUOUS
if var.cat == constants.LpInteger and self.mip:
varType = gurobipy.GRB.INTEGER
var.solverVar = lp.solverModel.addVar(lowBound, upBound,
vtype = varType,
obj = obj, name = var.name)
lp.solverModel.update()
log.debug("add the Constraints to the problem")
for name,constraint in lp.constraints.items():
#build the expression
expr = gurobipy.LinExpr(list(constraint.values()),
[v.solverVar for v in constraint.keys()])
if constraint.sense == constants.LpConstraintLE:
relation = gurobipy.GRB.LESS_EQUAL
elif constraint.sense == constants.LpConstraintGE:
relation = gurobipy.GRB.GREATER_EQUAL
elif constraint.sense == constants.LpConstraintEQ:
relation = gurobipy.GRB.EQUAL
else:
raise PulpSolverError('Detected an invalid constraint type')
constraint.solverConstraint = lp.solverModel.addConstr(expr,
relation, -constraint.constant, name)
lp.solverModel.update()
def actualSolve(self, lp, callback = None):
"""
Solve a well formulated lp problem
creates a gurobi model, variables and constraints and attaches
them to the lp model which it then solves
"""
self.buildSolverModel(lp)
#set the initial solution
log.debug("Solve the Model using gurobi")
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
def actualResolve(self, lp, callback = None):
"""
Solve a well formulated lp problem
uses the old solver and modifies the rhs of the modified constraints
"""
log.debug("Resolve the Model using gurobi")
for constraint in lp.constraints.values():
if constraint.modified:
constraint.solverConstraint.setAttr(gurobipy.GRB.Attr.RHS,
-constraint.constant)
lp.solverModel.update()
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
class GUROBI_CMD(LpSolver_CMD):
def defaultPath(self):
return self.executableExtension("gurobi_cl")
def available(self):
return self.executable(self.path)
def actualSolve(self, lp):
# TODO: workaround for python not reading LD_LIBRARY_PATH
# in my version of ubuntu
if 'GUROBI_HOME' in os.environ:
if 'LD_LIBRARY_PATH' not in os.environ:
os.environ['LD_LIBRARY_PATH'] = ""
os.environ['LD_LIBRARY_PATH'] += ':' + os.environ['GUROBI_HOME'] + "/lib"
if not self.executable(self.path):
raise PulpSolverError("PuLP: cannot execute "+self.path)
if not self.keepFiles:
uuid = uuid4().hex
tmpLp = os.path.join(self.tmpDir, "%s-pulp.lp" % uuid)
tmpSol = os.path.join(self.tmpDir, "%s-pulp.sol" % uuid)
tmpMst = os.path.join(self.tmpDir, "%s-pulp.mst" % uuid)
else:
tmpLp = lp.name+"-pulp.lp"
tmpSol = lp.name+"-pulp.sol"
tmpMst = lp.name + "-pulp.mst"
vs = lp.writeLP(tmpLp, writeSOS = 1)
try: os.remove(tmpSol)
except: pass
cmd = self.path
cmd += ' ' + ' '.join(['%s=%s' % (key, value)
for key, value in self.options])
cmd += ' ResultFile=%s' % tmpSol
if self.mip_start:
self.writesol(filename=tmpMst, vs=vs)
cmd += ' InputFile=%s' % tmpMst
if lp.isMIP():
if not self.mip:
warnings.warn('GUROBI_CMD does not allow a problem to be relaxed')
cmd += ' %s' % tmpLp
if self.msg:
pipe = None
else:
pipe = open(os.devnull, 'w')
return_code = subprocess.call(cmd.split(), stdout = pipe, stderr = pipe)
# Close the pipe now if we used it.
if pipe is not None:
pipe.close()
if return_code != 0:
raise PulpSolverError("PuLP: Error while trying to execute "+self.path)
if not os.path.exists(tmpSol):
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
values = reducedCosts = shadowPrices = slacks = None
else:
status, values, reducedCosts, shadowPrices, slacks = self.readsol(tmpSol)
if not self.keepFiles:
for f in [tmpSol, tmpMst, tmpLp, "gurobi.log"]:
try: os.remove(f)
except: pass
if status != constants.LpStatusInfeasible:
lp.assignVarsVals(values)
lp.assignVarsDj(reducedCosts)
lp.assignConsPi(shadowPrices)
lp.assignConsSlack(slacks)
lp.assignStatus(status)
return status
def readsol(self, filename):
with open(filename) as my_file:
try:
next(my_file) # skip the objective value
except StopIteration:
# Empty file not solved
warnings.warn('GUROBI_CMD does provide good solution status of non optimal solutions')
status = constants.LpStatusNotSolved
return status, {}, {}, {}, {}
#We have no idea what the status is assume optimal
# TODO: check status for Integer Feasible
status = constants.LpStatusOptimal
shadowPrices = {}
slacks = {}
shadowPrices = {}
slacks = {}
values = {}
reducedCosts = {}
for line in my_file:
if line[0] != '#': #skip comments
name, value = line.split()
values[name] = float(value)
return status, values, reducedCosts, shadowPrices, slacks
def writesol(self, filename, vs):
values = [(v.name, v.value()) for v in vs if v.value() is not None]
rows = []
for name, value in values:
rows.append('{} {}'.format(name, value))
with open(filename, 'w') as f:
f.write('\n'.join(rows))
return True
| true
| true
|
79070661e6fe22285dee8d3984c5e77158a8c8d2
| 5,093
|
py
|
Python
|
spider.py
|
edroot/busgov_spider
|
8247da7c98c1fab20a29369d274ff4d87f70a5d6
|
[
"Apache-2.0"
] | null | null | null |
spider.py
|
edroot/busgov_spider
|
8247da7c98c1fab20a29369d274ff4d87f70a5d6
|
[
"Apache-2.0"
] | null | null | null |
spider.py
|
edroot/busgov_spider
|
8247da7c98c1fab20a29369d274ff4d87f70a5d6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =========================================================================
# Author Eduard Kabrinskyi <soulroot@gmail.com> Skype: soulroot@hotmail.com
# =========================================================================
# =========================
# Main APP definitions
# =========================
import logging
import os
import requests
from lxml import html
import time
from random import choice
# =========================
# Database APP definitions
# =========================
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.orm import Session
from sqlalchemy import func
# =========================
# Set Logging
# =========================
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(module)s.%(funcName)s - %(message)s')
logging.getLogger().setLevel(logging.INFO)
logging.disable(logging.NOTSET)
logging.info('Loading %s', __name__)
# =========================
# Database Class
# =========================
Base = declarative_base()
class OrgTable(Base):
__tablename__ = 'organization'
id = Column(Integer, primary_key=True)
name = Column(String(2000))
inn = Column(Integer)
address = Column(String(2000))
def __init__(self, name, inn, address):
self.name = name
self.inn = inn
self.address = address
def __repr__(self):
return "<Data %s, %s>" % (self.name, self.innm, self.address)
# =========================
# Spider Class
# =========================
class Busgov(object):
def __init__(self):
basename = 'database.db'
self.engine = create_engine("sqlite:///%s" % basename, echo=False)
if not os.path.exists(basename):
Base.metadata.create_all(self.engine)
f = open('page.txt', 'r')
self.start = int(f.read())
f.close()
self.last_page = set()
def get_count_items(self):
self.session = Session(bind=self.engine)
items = self.session.query(func.count(OrgTable.id)).scalar()
self.session.close()
return logging.info('Now Database items count: %s' %items)
def get_pages(self, stop):
try:
for page in range(self.start, stop):
logging.info('Crawl page: %s' % (page))
page_text = get_page('http://bus.gov.ru/public/agency/choose.html?d-442831-p=' + str(page))
tree = html.fromstring(page_text)
org_list = tree.xpath('//table[@id="resultTable"]/tbody/tr[*]')
x=1
for org in org_list:
name = tree.xpath('//table[@id="resultTable"]/tbody/tr[' + str(x) + ']/td[2]/text()')[0].strip('\n ')
inn = tree.xpath('//table[@id="resultTable"]/tbody/tr['+str(x)+']/td[3]/text()')[0]
address = tree.xpath('//table[@id="resultTable"]/tbody/tr['+str(x)+']/td[4]/text()')[0].strip('\n ')
item = {'name': name, 'inn': inn, 'address': address}
x+=1
self.processed(item=item, page=page)
f = open('page.txt', 'w')
f.write(str(page))
f.close()
else:
raise logging.error('Stop Crawl last page: %' % page)
except Exception as e:
logging.error(e.message)
def processed(self, item, page):
self.session = Session(bind=self.engine)
#print item['name']
ot = OrgTable(item['name'], item['inn'], item['address'])
self.session.add(ot)
self.session.commit()
self.session.close()
# =========================
# Helper functions
# =========================
from requests.auth import HTTPDigestAuth, HTTPBasicAuth
proxies = {"http": (choice(list(open('proxy.txt')))).strip('\n')}
def get_request(page,proxies):
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
r = requests.get(page, headers=headers, proxies=proxies, timeout=10.0)
return r
except:
class r(object):
status_code = None
return r
pass
def get_page(page):
proxy_status = False
sleep_time = (1)
while proxy_status == False:
time.sleep(sleep_time)
logging.info("Set proxy: %s" %proxies["http"])
r = get_request(page=page,proxies=proxies)
if r.status_code == 200:
proxy_status = True
logging.info('Proxy UP: %s ' % proxies['http'])
else:
logging.info('Proxy DOWN: %s ' % proxies['http'])
global proxies
proxies = {"http": (choice(list(open('proxy.txt')))).strip('\n')}
return r.text
# =========================
# bg.get_pages(xxxx) количество страниц всего
# в файле page.txt текущая страница с которой стартовать
# =========================
if __name__ == "__main__":
bg = Busgov()
bg.get_count_items()
bg.get_pages(22278)
| 34.412162
| 122
| 0.531317
|
import logging
import os
import requests
from lxml import html
import time
from random import choice
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.orm import Session
from sqlalchemy import func
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(module)s.%(funcName)s - %(message)s')
logging.getLogger().setLevel(logging.INFO)
logging.disable(logging.NOTSET)
logging.info('Loading %s', __name__)
Base = declarative_base()
class OrgTable(Base):
__tablename__ = 'organization'
id = Column(Integer, primary_key=True)
name = Column(String(2000))
inn = Column(Integer)
address = Column(String(2000))
def __init__(self, name, inn, address):
self.name = name
self.inn = inn
self.address = address
def __repr__(self):
return "<Data %s, %s>" % (self.name, self.innm, self.address)
class Busgov(object):
def __init__(self):
basename = 'database.db'
self.engine = create_engine("sqlite:///%s" % basename, echo=False)
if not os.path.exists(basename):
Base.metadata.create_all(self.engine)
f = open('page.txt', 'r')
self.start = int(f.read())
f.close()
self.last_page = set()
def get_count_items(self):
self.session = Session(bind=self.engine)
items = self.session.query(func.count(OrgTable.id)).scalar()
self.session.close()
return logging.info('Now Database items count: %s' %items)
def get_pages(self, stop):
try:
for page in range(self.start, stop):
logging.info('Crawl page: %s' % (page))
page_text = get_page('http://bus.gov.ru/public/agency/choose.html?d-442831-p=' + str(page))
tree = html.fromstring(page_text)
org_list = tree.xpath('//table[@id="resultTable"]/tbody/tr[*]')
x=1
for org in org_list:
name = tree.xpath('//table[@id="resultTable"]/tbody/tr[' + str(x) + ']/td[2]/text()')[0].strip('\n ')
inn = tree.xpath('//table[@id="resultTable"]/tbody/tr['+str(x)+']/td[3]/text()')[0]
address = tree.xpath('//table[@id="resultTable"]/tbody/tr['+str(x)+']/td[4]/text()')[0].strip('\n ')
item = {'name': name, 'inn': inn, 'address': address}
x+=1
self.processed(item=item, page=page)
f = open('page.txt', 'w')
f.write(str(page))
f.close()
else:
raise logging.error('Stop Crawl last page: %' % page)
except Exception as e:
logging.error(e.message)
def processed(self, item, page):
self.session = Session(bind=self.engine)
ot = OrgTable(item['name'], item['inn'], item['address'])
self.session.add(ot)
self.session.commit()
self.session.close()
from requests.auth import HTTPDigestAuth, HTTPBasicAuth
proxies = {"http": (choice(list(open('proxy.txt')))).strip('\n')}
def get_request(page,proxies):
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
r = requests.get(page, headers=headers, proxies=proxies, timeout=10.0)
return r
except:
class r(object):
status_code = None
return r
pass
def get_page(page):
proxy_status = False
sleep_time = (1)
while proxy_status == False:
time.sleep(sleep_time)
logging.info("Set proxy: %s" %proxies["http"])
r = get_request(page=page,proxies=proxies)
if r.status_code == 200:
proxy_status = True
logging.info('Proxy UP: %s ' % proxies['http'])
else:
logging.info('Proxy DOWN: %s ' % proxies['http'])
global proxies
proxies = {"http": (choice(list(open('proxy.txt')))).strip('\n')}
return r.text
if __name__ == "__main__":
bg = Busgov()
bg.get_count_items()
bg.get_pages(22278)
| true
| true
|
7907067f5c31951f95dd837db1647147344f6e85
| 3,172
|
py
|
Python
|
core/tests.py
|
leonunesbs/aaafuria-rebon-backend
|
a969eab64b4968574f2d4ed0d746ca7cc63bf82b
|
[
"MIT"
] | 1
|
2022-02-23T01:04:51.000Z
|
2022-02-23T01:04:51.000Z
|
core/tests.py
|
leonunesbs/aaafuria-rebon-backend
|
a969eab64b4968574f2d4ed0d746ca7cc63bf82b
|
[
"MIT"
] | 65
|
2021-12-12T13:20:58.000Z
|
2022-03-29T17:03:43.000Z
|
core/tests.py
|
leonunesbs/aaafuria-rebon-backend
|
a969eab64b4968574f2d4ed0d746ca7cc63bf82b
|
[
"MIT"
] | 1
|
2022-03-06T17:50:49.000Z
|
2022-03-06T17:50:49.000Z
|
from datetime import datetime, timedelta
import requests
from decouple import config
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from .models import Socio
class ModelTest(TestCase):
def setUp(self):
Socio(
user=User.objects.create_user(
username='00000000',
password='000000'
),
nome='João de Souza',
apelido='João',
whatsapp='(86) 9 9123-4567',
cpf='068.008.773-79',
rg='123456789',
data_nascimento='2000-01-01',
data_inicio=timezone.now(),
data_fim=timezone.now() + timedelta(days=40),
is_socio=True,
stripe_customer_id='cus_00000000',).save()
def test_notificar_email(self):
socio = Socio.objects.create(
user=User.objects.create_user(
username='12345678',
password='123456',
),
nome='Fulano',
stripe_customer_id='cus_123456789',
)
notificar = socio.notificar(metodo='email', mensagem='teste')
self.assertEqual(notificar, 'Enviando email...')
def test_datetime(self):
current_period_end = datetime(
2022, 6, 30, 23, 59, 59
)
if current_period_end - datetime.now() > timedelta(days=30):
if datetime.now().month < 7:
if current_period_end.month > 6:
current_period_end = datetime(
datetime.now().year, 6, 30, 23, 59, 59
)
def test_adicionar_socio_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.data_fim - timezone.now().date() > timedelta(days=30) and socio.is_socio:
url = 'https://cheersshop.com.br/socio/adicionar'
obj = {
"nome": socio.nome,
"email": socio.email,
"telefone": socio.whatsapp,
"matricula": socio.matricula,
"observacao": "",
"cpf": socio.cpf,
"data_fim_plano": socio.data_fim,
"vendedor": "1874"
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config("CHEERS_TOKEN")}'})
self.assertEqual(response.status_code, 200)
def test_adicionar_coupom_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.is_socio:
url = 'https://cheersshop.com.br/codigo'
obj = {
"nome": socio.cpf,
"uso": 1,
"ativo": True,
"desconto_reais": 70 if socio.is_atleta else 65,
"maximo_usuario": "1",
"quantidade": "1",
"usuario": 192061,
"vendedor": "1874",
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config("CHEERS_TOKEN")}'})
self.assertEqual(response.json()['status'], 'Success')
| 33.041667
| 90
| 0.533733
|
from datetime import datetime, timedelta
import requests
from decouple import config
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from .models import Socio
class ModelTest(TestCase):
def setUp(self):
Socio(
user=User.objects.create_user(
username='00000000',
password='000000'
),
nome='João de Souza',
apelido='João',
whatsapp='(86) 9 9123-4567',
cpf='068.008.773-79',
rg='123456789',
data_nascimento='2000-01-01',
data_inicio=timezone.now(),
data_fim=timezone.now() + timedelta(days=40),
is_socio=True,
stripe_customer_id='cus_00000000',).save()
def test_notificar_email(self):
socio = Socio.objects.create(
user=User.objects.create_user(
username='12345678',
password='123456',
),
nome='Fulano',
stripe_customer_id='cus_123456789',
)
notificar = socio.notificar(metodo='email', mensagem='teste')
self.assertEqual(notificar, 'Enviando email...')
def test_datetime(self):
current_period_end = datetime(
2022, 6, 30, 23, 59, 59
)
if current_period_end - datetime.now() > timedelta(days=30):
if datetime.now().month < 7:
if current_period_end.month > 6:
current_period_end = datetime(
datetime.now().year, 6, 30, 23, 59, 59
)
def test_adicionar_socio_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.data_fim - timezone.now().date() > timedelta(days=30) and socio.is_socio:
url = 'https://cheersshop.com.br/socio/adicionar'
obj = {
"nome": socio.nome,
"email": socio.email,
"telefone": socio.whatsapp,
"matricula": socio.matricula,
"observacao": "",
"cpf": socio.cpf,
"data_fim_plano": socio.data_fim,
"vendedor": "1874"
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config("CHEERS_TOKEN")}'})
self.assertEqual(response.status_code, 200)
def test_adicionar_coupom_cheers(self):
socio: Socio = Socio.objects.get(user__username='00000000')
if socio.is_socio:
url = 'https://cheersshop.com.br/codigo'
obj = {
"nome": socio.cpf,
"uso": 1,
"ativo": True,
"desconto_reais": 70 if socio.is_atleta else 65,
"maximo_usuario": "1",
"quantidade": "1",
"usuario": 192061,
"vendedor": "1874",
}
response = requests.post(url, data=obj, headers={
'Authorization': f'Bearer {config("CHEERS_TOKEN")}'})
self.assertEqual(response.json()['status'], 'Success')
| true
| true
|
790706fd037b1cafec446abf5bd47829d039f68b
| 8,665
|
py
|
Python
|
tests/test_root_versioning_integration.py
|
ninox-iot/tuf
|
5115cfc764a8316b5a857ce7d978d9a2b6909e11
|
[
"MIT"
] | null | null | null |
tests/test_root_versioning_integration.py
|
ninox-iot/tuf
|
5115cfc764a8316b5a857ce7d978d9a2b6909e11
|
[
"MIT"
] | null | null | null |
tests/test_root_versioning_integration.py
|
ninox-iot/tuf
|
5115cfc764a8316b5a857ce7d978d9a2b6909e11
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
<Program Name>
test_root_versioning_integration.py
<Author>
Evan Cordell.
<Started>
July 21, 2016.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Test root versioning for efficient root key rotation.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import logging
import tempfile
import shutil
import sys
# 'unittest2' required for testing under Python < 2.7.
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
import tuf
import tuf.log
import tuf.formats
import tuf.exceptions
import tuf.roledb
import tuf.keydb
import tuf.repository_tool as repo_tool
import securesystemslib
logger = logging.getLogger('tuf.test_root_versioning')
repo_tool.disable_console_log_messages()
class TestRepository(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd())
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.temporary_directory)
def tearDown(self):
tuf.roledb.clear_roledb()
tuf.keydb.clear_keydb()
def test_init(self):
# Test normal case.
repository = repo_tool.Repository('repository_directory/',
'metadata_directory/',
'targets_directory/')
self.assertTrue(isinstance(repository.root, repo_tool.Root))
self.assertTrue(isinstance(repository.snapshot, repo_tool.Snapshot))
self.assertTrue(isinstance(repository.timestamp, repo_tool.Timestamp))
self.assertTrue(isinstance(repository.targets, repo_tool.Targets))
# Test improperly formatted arguments.
self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, 3,
'metadata_directory/', 'targets_directory')
self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository,
'repository_directory', 3, 'targets_directory')
self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository,
'repository_directory', 'metadata_directory', 3)
def test_root_role_versioning(self):
# Test root role versioning
#
# 1. Import public and private keys.
# 2. Add verification keys.
# 3. Load signing keys.
# 4. Add target files.
# 5. Perform delegation.
# 6. writeall()
#
# Copy the target files from 'tuf/tests/repository_data' so that writeall()
# has target fileinfo to include in metadata.
temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory)
targets_directory = os.path.join(temporary_directory, 'repository',
repo_tool.TARGETS_DIRECTORY_NAME)
original_targets_directory = os.path.join('repository_data',
'repository', 'targets')
shutil.copytree(original_targets_directory, targets_directory)
# In this case, create_new_repository() creates the 'repository/'
# sub-directory in 'temporary_directory' if it does not exist.
repository_directory = os.path.join(temporary_directory, 'repository')
metadata_directory = os.path.join(repository_directory,
repo_tool.METADATA_STAGED_DIRECTORY_NAME)
repository = repo_tool.create_new_repository(repository_directory)
# (1) Load the public and private keys of the top-level roles, and one
# delegated role.
keystore_directory = os.path.join('repository_data', 'keystore')
# Load the public keys.
root_pubkey_path = os.path.join(keystore_directory, 'root_key.pub')
targets_pubkey_path = os.path.join(keystore_directory, 'targets_key.pub')
snapshot_pubkey_path = os.path.join(keystore_directory, 'snapshot_key.pub')
timestamp_pubkey_path = os.path.join(keystore_directory, 'timestamp_key.pub')
role1_pubkey_path = os.path.join(keystore_directory, 'delegation_key.pub')
root_pubkey = repo_tool.import_rsa_publickey_from_file(root_pubkey_path)
targets_pubkey = repo_tool.import_ed25519_publickey_from_file(targets_pubkey_path)
snapshot_pubkey = \
repo_tool.import_ed25519_publickey_from_file(snapshot_pubkey_path)
timestamp_pubkey = \
repo_tool.import_ed25519_publickey_from_file(timestamp_pubkey_path)
role1_pubkey = repo_tool.import_ed25519_publickey_from_file(role1_pubkey_path)
# Load the private keys.
root_privkey_path = os.path.join(keystore_directory, 'root_key')
targets_privkey_path = os.path.join(keystore_directory, 'targets_key')
snapshot_privkey_path = os.path.join(keystore_directory, 'snapshot_key')
timestamp_privkey_path = os.path.join(keystore_directory, 'timestamp_key')
role1_privkey_path = os.path.join(keystore_directory, 'delegation_key')
root_privkey = \
repo_tool.import_rsa_privatekey_from_file(root_privkey_path, 'password')
targets_privkey = \
repo_tool.import_ed25519_privatekey_from_file(targets_privkey_path, 'password')
snapshot_privkey = \
repo_tool.import_ed25519_privatekey_from_file(snapshot_privkey_path,
'password')
timestamp_privkey = \
repo_tool.import_ed25519_privatekey_from_file(timestamp_privkey_path,
'password')
role1_privkey = \
repo_tool.import_ed25519_privatekey_from_file(role1_privkey_path,
'password')
# (2) Add top-level verification keys.
repository.root.add_verification_key(root_pubkey)
repository.targets.add_verification_key(targets_pubkey)
repository.snapshot.add_verification_key(snapshot_pubkey)
repository.timestamp.add_verification_key(timestamp_pubkey)
# (3) Load top-level signing keys.
repository.root.load_signing_key(root_privkey)
repository.targets.load_signing_key(targets_privkey)
repository.snapshot.load_signing_key(snapshot_privkey)
repository.timestamp.load_signing_key(timestamp_privkey)
# (4) Add target files.
target1 = os.path.join(targets_directory, 'file1.txt')
target2 = os.path.join(targets_directory, 'file2.txt')
target3 = os.path.join(targets_directory, 'file3.txt')
repository.targets.add_target(target1)
repository.targets.add_target(target2)
# (5) Perform delegation.
repository.targets.delegate('role1', [role1_pubkey], [target3])
repository.targets('role1').load_signing_key(role1_privkey)
# (6) Write repository.
repository.targets.compressions = ['gz']
repository.writeall()
self.assertTrue(os.path.exists(os.path.join(metadata_directory, 'root.json')))
self.assertTrue(os.path.exists(os.path.join(metadata_directory, '1.root.json')))
# Verify that the expected metadata is written.
root_filepath = os.path.join(metadata_directory, 'root.json')
root_1_filepath = os.path.join(metadata_directory, '1.root.json')
root_2_filepath = os.path.join(metadata_directory, '2.root.json')
old_root_signable = securesystemslib.util.load_json_file(root_filepath)
root_1_signable = securesystemslib.util.load_json_file(root_1_filepath)
# Make a change to the root keys
repository.root.add_verification_key(targets_pubkey)
repository.root.load_signing_key(targets_privkey)
repository.root.threshold = 2
repository.writeall()
new_root_signable = securesystemslib.util.load_json_file(root_filepath)
root_2_signable = securesystemslib.util.load_json_file(root_2_filepath)
for role_signable in [old_root_signable, new_root_signable, root_1_signable, root_2_signable]:
# Raise 'securesystemslib.exceptions.FormatError' if 'role_signable' is an
# invalid signable.
tuf.formats.check_signable_object_format(role_signable)
# Verify contents of versioned roots
self.assertEqual(old_root_signable, root_1_signable)
self.assertEqual(new_root_signable, root_2_signable)
self.assertEqual(root_1_signable['signed']['version'], 1)
self.assertEqual(root_2_signable['signed']['version'], 2)
repository.root.remove_verification_key(root_pubkey)
repository.root.unload_signing_key(root_privkey)
repository.root.threshold = 2
# Errors, not enough signing keys to satisfy old threshold
self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall)
# No error, write() ignore's root's threshold and allows it to be written
# to disk partially signed.
repository.write('root')
if __name__ == '__main__':
unittest.main()
| 37.349138
| 98
| 0.734795
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import logging
import tempfile
import shutil
import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
import tuf
import tuf.log
import tuf.formats
import tuf.exceptions
import tuf.roledb
import tuf.keydb
import tuf.repository_tool as repo_tool
import securesystemslib
logger = logging.getLogger('tuf.test_root_versioning')
repo_tool.disable_console_log_messages()
class TestRepository(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temporary_directory = tempfile.mkdtemp(dir=os.getcwd())
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.temporary_directory)
def tearDown(self):
tuf.roledb.clear_roledb()
tuf.keydb.clear_keydb()
def test_init(self):
repository = repo_tool.Repository('repository_directory/',
'metadata_directory/',
'targets_directory/')
self.assertTrue(isinstance(repository.root, repo_tool.Root))
self.assertTrue(isinstance(repository.snapshot, repo_tool.Snapshot))
self.assertTrue(isinstance(repository.timestamp, repo_tool.Timestamp))
self.assertTrue(isinstance(repository.targets, repo_tool.Targets))
self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository, 3,
'metadata_directory/', 'targets_directory')
self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository,
'repository_directory', 3, 'targets_directory')
self.assertRaises(securesystemslib.exceptions.FormatError, repo_tool.Repository,
'repository_directory', 'metadata_directory', 3)
def test_root_role_versioning(self):
temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory)
targets_directory = os.path.join(temporary_directory, 'repository',
repo_tool.TARGETS_DIRECTORY_NAME)
original_targets_directory = os.path.join('repository_data',
'repository', 'targets')
shutil.copytree(original_targets_directory, targets_directory)
repository_directory = os.path.join(temporary_directory, 'repository')
metadata_directory = os.path.join(repository_directory,
repo_tool.METADATA_STAGED_DIRECTORY_NAME)
repository = repo_tool.create_new_repository(repository_directory)
keystore_directory = os.path.join('repository_data', 'keystore')
root_pubkey_path = os.path.join(keystore_directory, 'root_key.pub')
targets_pubkey_path = os.path.join(keystore_directory, 'targets_key.pub')
snapshot_pubkey_path = os.path.join(keystore_directory, 'snapshot_key.pub')
timestamp_pubkey_path = os.path.join(keystore_directory, 'timestamp_key.pub')
role1_pubkey_path = os.path.join(keystore_directory, 'delegation_key.pub')
root_pubkey = repo_tool.import_rsa_publickey_from_file(root_pubkey_path)
targets_pubkey = repo_tool.import_ed25519_publickey_from_file(targets_pubkey_path)
snapshot_pubkey = \
repo_tool.import_ed25519_publickey_from_file(snapshot_pubkey_path)
timestamp_pubkey = \
repo_tool.import_ed25519_publickey_from_file(timestamp_pubkey_path)
role1_pubkey = repo_tool.import_ed25519_publickey_from_file(role1_pubkey_path)
root_privkey_path = os.path.join(keystore_directory, 'root_key')
targets_privkey_path = os.path.join(keystore_directory, 'targets_key')
snapshot_privkey_path = os.path.join(keystore_directory, 'snapshot_key')
timestamp_privkey_path = os.path.join(keystore_directory, 'timestamp_key')
role1_privkey_path = os.path.join(keystore_directory, 'delegation_key')
root_privkey = \
repo_tool.import_rsa_privatekey_from_file(root_privkey_path, 'password')
targets_privkey = \
repo_tool.import_ed25519_privatekey_from_file(targets_privkey_path, 'password')
snapshot_privkey = \
repo_tool.import_ed25519_privatekey_from_file(snapshot_privkey_path,
'password')
timestamp_privkey = \
repo_tool.import_ed25519_privatekey_from_file(timestamp_privkey_path,
'password')
role1_privkey = \
repo_tool.import_ed25519_privatekey_from_file(role1_privkey_path,
'password')
repository.root.add_verification_key(root_pubkey)
repository.targets.add_verification_key(targets_pubkey)
repository.snapshot.add_verification_key(snapshot_pubkey)
repository.timestamp.add_verification_key(timestamp_pubkey)
repository.root.load_signing_key(root_privkey)
repository.targets.load_signing_key(targets_privkey)
repository.snapshot.load_signing_key(snapshot_privkey)
repository.timestamp.load_signing_key(timestamp_privkey)
target1 = os.path.join(targets_directory, 'file1.txt')
target2 = os.path.join(targets_directory, 'file2.txt')
target3 = os.path.join(targets_directory, 'file3.txt')
repository.targets.add_target(target1)
repository.targets.add_target(target2)
repository.targets.delegate('role1', [role1_pubkey], [target3])
repository.targets('role1').load_signing_key(role1_privkey)
repository.targets.compressions = ['gz']
repository.writeall()
self.assertTrue(os.path.exists(os.path.join(metadata_directory, 'root.json')))
self.assertTrue(os.path.exists(os.path.join(metadata_directory, '1.root.json')))
root_filepath = os.path.join(metadata_directory, 'root.json')
root_1_filepath = os.path.join(metadata_directory, '1.root.json')
root_2_filepath = os.path.join(metadata_directory, '2.root.json')
old_root_signable = securesystemslib.util.load_json_file(root_filepath)
root_1_signable = securesystemslib.util.load_json_file(root_1_filepath)
repository.root.add_verification_key(targets_pubkey)
repository.root.load_signing_key(targets_privkey)
repository.root.threshold = 2
repository.writeall()
new_root_signable = securesystemslib.util.load_json_file(root_filepath)
root_2_signable = securesystemslib.util.load_json_file(root_2_filepath)
for role_signable in [old_root_signable, new_root_signable, root_1_signable, root_2_signable]:
tuf.formats.check_signable_object_format(role_signable)
self.assertEqual(old_root_signable, root_1_signable)
self.assertEqual(new_root_signable, root_2_signable)
self.assertEqual(root_1_signable['signed']['version'], 1)
self.assertEqual(root_2_signable['signed']['version'], 2)
repository.root.remove_verification_key(root_pubkey)
repository.root.unload_signing_key(root_privkey)
repository.root.threshold = 2
self.assertRaises(tuf.exceptions.UnsignedMetadataError, repository.writeall)
repository.write('root')
if __name__ == '__main__':
unittest.main()
| true
| true
|
790707004cff4fdd9940b29429745fc5fa573ac4
| 19,632
|
py
|
Python
|
schicexplorer/scHicCluster.py
|
joachimwolff/scHiCExplorer
|
8aebb444f3968d398c260690c89c9cd0e3186f0e
|
[
"MIT"
] | 10
|
2019-12-09T04:11:18.000Z
|
2021-03-24T15:29:06.000Z
|
schicexplorer/scHicCluster.py
|
joachimwolff/scHiCExplorer
|
8aebb444f3968d398c260690c89c9cd0e3186f0e
|
[
"MIT"
] | 2
|
2020-12-24T12:32:18.000Z
|
2021-01-11T09:03:34.000Z
|
schicexplorer/scHicCluster.py
|
joachimwolff/scHiCExplorer
|
8aebb444f3968d398c260690c89c9cd0e3186f0e
|
[
"MIT"
] | 2
|
2019-12-09T04:11:21.000Z
|
2020-12-24T12:26:46.000Z
|
import argparse
import os
from multiprocessing import Process, Queue
import time
import logging
log = logging.getLogger(__name__)
from scipy import linalg
import cooler
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
from hicmatrix import HiCMatrix as hm
import numpy as np
from scipy.sparse import csr_matrix
from holoviews.plotting.util import process_cmap
from schicexplorer._version import __version__
from schicexplorer.utilities import cell_name_list, create_csr_matrix_all_cells
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
description='scHicCluster uses kmeans or spectral clustering to associate each cell to a cluster and therefore to its cell cycle. '
'The clustering can be run on the raw data, on a kNN computed via the exact euclidean distance or via PCA. '
'Please consider also the other clustering and dimension reduction approaches of the scHicExplorer suite. They can give you better results, '
'can be faster or less memory demanding.'
)
parserRequired = parser.add_argument_group('Required arguments')
# define the arguments
parserRequired.add_argument('--matrix', '-m',
help='The single cell Hi-C interaction matrices to cluster. Needs to be in scool format',
metavar='scool scHi-C matrix',
required=True)
parserRequired.add_argument('--numberOfClusters', '-c',
help='Number of to be computed clusters',
required=False,
default=12,
type=int)
parserRequired.add_argument('--clusterMethod', '-cm',
help='Algorithm to cluster the Hi-C matrices',
choices=['spectral', 'kmeans'],
default='spectral')
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--chromosomes',
help='List of to be plotted chromosomes',
nargs='+')
parserOpt.add_argument('--intraChromosomalContactsOnly', '-ic',
help='This option loads only the intra-chromosomal contacts. Can improve the cluster result if data is very noisy.',
action='store_true')
parserOpt.add_argument('--additionalPCA', '-pca',
help='Computes PCA on top of a k-nn. Can improve the cluster result.',
action='store_true')
parserOpt.add_argument('--dimensionsPCA', '-dim_pca',
help='The number of dimensions from the PCA matrix that should be considered for clustering. Can improve the cluster result.',
default=20,
type=int)
parserOpt.add_argument('--dimensionReductionMethod', '-drm',
help='Dimension reduction methods, knn with euclidean distance, pca',
choices=['none', 'knn', 'pca'],
default='none')
parserOpt.add_argument('--createScatterPlot', '-csp',
help='Create a scatter plot for the clustering, the x and y are the first and second principal component of the computed k-nn graph.',
required=False,
default=None)
parserOpt.add_argument('--numberOfNearestNeighbors', '-k',
help='Number of to be used computed nearest neighbors for the knn graph. Default is either the default value or the number of the provided cells, whatever is smaller.',
required=False,
default=100,
type=int)
parserOpt.add_argument('--dpi', '-d',
help='The dpi of the scatter plot.',
required=False,
default=300,
type=int)
parserOpt.add_argument('--outFileName', '-o',
help='File name to save the resulting clusters',
required=True,
default='clusters.txt')
parserOpt.add_argument('--cell_coloring_type', '-cct',
help='A two column list, first colum the cell names as stored in the scool file, second column the associated coloring for the scatter plot',
required=False)
parserOpt.add_argument('--cell_coloring_batch', '-ccb',
help='A two column list, first colum the cell names as stored in the scool file, second column the associated coloring for the scatter plot',
required=False)
parserOpt.add_argument('--latexTable', '-lt',
help='Return the overlap statistics if --cell_coloring_type is given as a latex table.')
parserOpt.add_argument('--figuresize',
help='Fontsize in the plot for x and y axis.',
type=float,
nargs=2,
default=(15, 6),
metavar=('x-size', 'y-size'))
parserOpt.add_argument('--colorMap',
help='Color map to use for the heatmap, supported are the categorical colormaps from holoviews: '
'http://holoviews.org/user_guide/Colormaps.html',
default='glasbey_dark')
parserOpt.add_argument('--fontsize',
help='Fontsize in the plot for x and y axis.',
type=float,
default=15)
parserOpt.add_argument('--threads', '-t',
help='Number of threads. Using the python multiprocessing module.',
required=False,
default=8,
type=int)
parserOpt.add_argument('--help', '-h', action='help', help='show this help message and exit')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
outputFolder = os.path.dirname(os.path.abspath(args.outFileName)) + '/'
log.debug('outputFolder {}'.format(outputFolder))
if args.cell_coloring_type:
cell_name_cell_type_dict = {}
cell_type_color_dict = {}
color_cell_type_dict = {}
cell_type_counter = 0
with open(args.cell_coloring_type, 'r') as file:
for i, line in enumerate(file.readlines()):
line = line.strip()
try:
cell_name, cell_type = line.split('\t')
except Exception:
cell_name, cell_type = line.split(' ')
cell_name_cell_type_dict[cell_name] = cell_type
if cell_type not in cell_type_color_dict:
cell_type_color_dict[cell_type] = cell_type_counter
color_cell_type_dict[cell_type_counter] = cell_type
cell_type_counter += 1
if args.cell_coloring_batch:
cell_name_cell_type_dict_batch = {}
cell_type_color_dict_batch = {}
color_cell_type_dict_batch = {}
cell_type_counter_batch = 0
with open(args.cell_coloring_batch, 'r') as file:
for i, line in enumerate(file.readlines()):
line = line.strip()
try:
cell_name, cell_type = line.split('\t')
except Exception:
cell_name, cell_type = line.split(' ')
cell_name_cell_type_dict_batch[cell_name] = cell_type
if cell_type not in cell_type_color_dict_batch:
cell_type_color_dict_batch[cell_type] = cell_type_counter_batch
color_cell_type_dict_batch[cell_type_counter_batch] = cell_type
cell_type_counter_batch += 1
raw_file_name = os.path.splitext(os.path.basename(args.outFileName))[0]
neighborhood_matrix, matrices_list = create_csr_matrix_all_cells(args.matrix, args.threads, args.chromosomes, outputFolder, raw_file_name, args.intraChromosomalContactsOnly)
reduce_to_dimension = neighborhood_matrix.shape[0] - 1
if args.dimensionReductionMethod == 'knn':
if args.numberOfNearestNeighbors > reduce_to_dimension:
args.numberOfNearestNeighbors = reduce_to_dimension
nbrs = NearestNeighbors(n_neighbors=args.numberOfNearestNeighbors, algorithm='ball_tree', n_jobs=args.threads).fit(neighborhood_matrix)
neighborhood_matrix = nbrs.kneighbors_graph(mode='distance')
if args.additionalPCA:
pca = PCA(n_components=min(neighborhood_matrix.shape) - 1)
neighborhood_matrix = pca.fit_transform(neighborhood_matrix.todense())
if args.dimensionsPCA:
args.dimensionsPCA = min(args.dimensionsPCA, neighborhood_matrix.shape[0])
neighborhood_matrix = neighborhood_matrix[:, :args.dimensionsPCA]
elif args.dimensionReductionMethod == 'pca':
corrmatrix = np.cov(neighborhood_matrix.todense())
evals, eigs = linalg.eig(corrmatrix)
neighborhood_matrix = eigs[:, :reduce_to_dimension].transpose()
if args.clusterMethod == 'spectral':
spectralClustering_object = SpectralClustering(n_clusters=args.numberOfClusters, n_jobs=args.threads,
n_neighbors=reduce_to_dimension, affinity='nearest_neighbors', random_state=0, eigen_solver="arpack")
labels_clustering = spectralClustering_object.fit_predict(neighborhood_matrix)
elif args.clusterMethod == 'kmeans':
kmeans_object = KMeans(n_clusters=args.numberOfClusters, random_state=0, n_jobs=args.threads, precompute_distances=True)
labels_clustering = kmeans_object.fit_predict(neighborhood_matrix)
if args.colorMap:
colors = process_cmap(args.colorMap)
if args.cell_coloring_type:
if len(colors) < len(cell_type_color_dict):
log.error('The chosen colormap offers too less values for the number of clusters.')
exit(1)
labels_clustering_cell_type = []
for cell_name in matrices_list:
labels_clustering_cell_type.append(cell_type_color_dict[cell_name_cell_type_dict[cell_name]])
labels_clustering_cell_type = np.array(labels_clustering_cell_type)
log.debug('labels_clustering_cell_type: {}'.format(len(labels_clustering_cell_type)))
log.debug('matrices_list: {}'.format(len(matrices_list)))
label_x = 'PC1'
label_y = 'PC2'
if args.createScatterPlot:
if args.dimensionReductionMethod == 'none':
log.warning('Raw matrix clustering scatter plot needs to compute a PCA and can request large amount (> 100 GB) of memory.')
log.debug('args.additionalPCA {}'.format(args.additionalPCA))
log.debug('args.dimensionReductionMethod {}'.format(args.dimensionReductionMethod))
if args.dimensionReductionMethod == 'none' or (args.dimensionReductionMethod == 'knn' and not args.additionalPCA):
log.debug('compute pca')
pca = PCA(n_components=min(neighborhood_matrix.shape) - 1)
neighborhood_matrix_knn = pca.fit_transform(neighborhood_matrix.todense())
log.debug('compute pca')
else:
log.debug('already computed pca')
neighborhood_matrix_knn = neighborhood_matrix
if args.cell_coloring_type:
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:len(cell_type_color_dict)]):
mask = labels_clustering_cell_type == i
log.debug('plot cluster: {} {}'.format(color_cell_type_dict[i], np.sum(mask)))
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(color_cell_type_dict[i]), s=20, alpha=0.7)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '_cell_color.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
if args.cell_coloring_batch:
if len(colors) < len(cell_type_color_dict_batch):
log.error('The chosen colormap offers too less values for the number of clusters.')
exit(1)
labels_clustering_cell_type_batch = []
for cell_name in matrices_list:
labels_clustering_cell_type_batch.append(cell_type_color_dict_batch[cell_name_cell_type_dict_batch[cell_name]])
labels_clustering_cell_type_batch = np.array(labels_clustering_cell_type_batch)
log.debug('labels_clustering_cell_type: {}'.format(len(labels_clustering_cell_type_batch)))
log.debug('matrices_list: {}'.format(len(matrices_list)))
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:len(cell_type_color_dict_batch)]):
mask = labels_clustering_cell_type_batch == i
log.debug('plot cluster: {} {}'.format(color_cell_type_dict_batch[i], np.sum(mask)))
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(color_cell_type_dict_batch[i]), s=20, alpha=0.7)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '_cell_color_batch.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:args.numberOfClusters]):
mask = labels_clustering == i
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(i), s=20, alpha=0.7)
plt.legend(fontsize=args.fontsize)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
if args.latexTable and args.cell_coloring_type:
# compute overlap of cell_type find found clusters
computed_clusters = set(labels_clustering)
cell_type_amounts_dict = {}
# percentage_threshold = 0.8
for threshold in [0.7, 0.8, 0.9]:
cell_type_amounts_dict[threshold] = {}
with open(args.latexTable, 'w') as matches_file:
header = '\\begin{table}[!htb]\n\\footnotesize\n\\begin{tabular}{|l'
body = '\\hline Cluster '
for i in range(len(color_cell_type_dict)):
mask_cell_type = labels_clustering_cell_type == i
header += '|c'
body += '& ' + str(color_cell_type_dict[i]) + ' (' + str(np.sum(mask_cell_type)) + ' cells)'
header += '|}\n'
body += '\\\\\n'
# body = ''
for i in computed_clusters:
body += '\\hline Cluster ' + str(i)
mask_computed_clusters = labels_clustering == i
body += ' (' + str(np.sum(mask_computed_clusters)) + ' cells)'
for j in range(len(cell_type_color_dict)):
mask_cell_type = labels_clustering_cell_type == j
mask = mask_computed_clusters & mask_cell_type
number_of_matches = np.sum(mask)
body += '& ' + str(number_of_matches)
if number_of_matches != 1:
body += ' cells / '
else:
body += ' cell / '
body += '{:.2f}'.format((number_of_matches / np.sum(mask_computed_clusters)) * 100) + ' \\% '
for threshold in [0.7, 0.8, 0.9]:
if number_of_matches / np.sum(mask_computed_clusters) >= threshold:
if color_cell_type_dict[j] in cell_type_amounts_dict[threshold]:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] += number_of_matches
else:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] = number_of_matches
else:
if color_cell_type_dict[j] in cell_type_amounts_dict[threshold]:
continue
else:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] = 0
body += '\\\\\n'
body += '\\hline ' + '&' * len(cell_type_color_dict) + '\\\\\n'
for threshold in [0.7, 0.8, 0.9]:
body += '\\hline Correct identified $>{}\\%$'.format(int(threshold * 100))
for i in range(len(cell_type_color_dict)):
mask_cell_type = labels_clustering_cell_type == i
if color_cell_type_dict[i] in cell_type_amounts_dict[threshold]:
body += '& ' + str(cell_type_amounts_dict[threshold][color_cell_type_dict[i]]) + ' / ' + str(np.sum(mask_cell_type)) + ' ('
body += '{:.2f}'.format((cell_type_amounts_dict[threshold][color_cell_type_dict[i]] / np.sum(mask_cell_type)) * 100)
else:
body += '& ' + str(0) + ' / ' + str(np.sum(mask_cell_type)) + ' ('
body += '{:.2f}'.format(0 / np.sum(mask_cell_type))
body += ' \\%)'
body += '\\\\\n'
body += '\\hline \n'
body += '\\end{tabular}\n\\caption{}\n\\end{table}'
matches_file.write(header)
matches_file.write(body)
matrices_cluster = list(zip(matrices_list, labels_clustering))
np.savetxt(args.outFileName, matrices_cluster, fmt="%s")
| 52.491979
| 195
| 0.596781
|
import argparse
import os
from multiprocessing import Process, Queue
import time
import logging
log = logging.getLogger(__name__)
from scipy import linalg
import cooler
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
from hicmatrix import HiCMatrix as hm
import numpy as np
from scipy.sparse import csr_matrix
from holoviews.plotting.util import process_cmap
from schicexplorer._version import __version__
from schicexplorer.utilities import cell_name_list, create_csr_matrix_all_cells
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
description='scHicCluster uses kmeans or spectral clustering to associate each cell to a cluster and therefore to its cell cycle. '
'The clustering can be run on the raw data, on a kNN computed via the exact euclidean distance or via PCA. '
'Please consider also the other clustering and dimension reduction approaches of the scHicExplorer suite. They can give you better results, '
'can be faster or less memory demanding.'
)
parserRequired = parser.add_argument_group('Required arguments')
parserRequired.add_argument('--matrix', '-m',
help='The single cell Hi-C interaction matrices to cluster. Needs to be in scool format',
metavar='scool scHi-C matrix',
required=True)
parserRequired.add_argument('--numberOfClusters', '-c',
help='Number of to be computed clusters',
required=False,
default=12,
type=int)
parserRequired.add_argument('--clusterMethod', '-cm',
help='Algorithm to cluster the Hi-C matrices',
choices=['spectral', 'kmeans'],
default='spectral')
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--chromosomes',
help='List of to be plotted chromosomes',
nargs='+')
parserOpt.add_argument('--intraChromosomalContactsOnly', '-ic',
help='This option loads only the intra-chromosomal contacts. Can improve the cluster result if data is very noisy.',
action='store_true')
parserOpt.add_argument('--additionalPCA', '-pca',
help='Computes PCA on top of a k-nn. Can improve the cluster result.',
action='store_true')
parserOpt.add_argument('--dimensionsPCA', '-dim_pca',
help='The number of dimensions from the PCA matrix that should be considered for clustering. Can improve the cluster result.',
default=20,
type=int)
parserOpt.add_argument('--dimensionReductionMethod', '-drm',
help='Dimension reduction methods, knn with euclidean distance, pca',
choices=['none', 'knn', 'pca'],
default='none')
parserOpt.add_argument('--createScatterPlot', '-csp',
help='Create a scatter plot for the clustering, the x and y are the first and second principal component of the computed k-nn graph.',
required=False,
default=None)
parserOpt.add_argument('--numberOfNearestNeighbors', '-k',
help='Number of to be used computed nearest neighbors for the knn graph. Default is either the default value or the number of the provided cells, whatever is smaller.',
required=False,
default=100,
type=int)
parserOpt.add_argument('--dpi', '-d',
help='The dpi of the scatter plot.',
required=False,
default=300,
type=int)
parserOpt.add_argument('--outFileName', '-o',
help='File name to save the resulting clusters',
required=True,
default='clusters.txt')
parserOpt.add_argument('--cell_coloring_type', '-cct',
help='A two column list, first colum the cell names as stored in the scool file, second column the associated coloring for the scatter plot',
required=False)
parserOpt.add_argument('--cell_coloring_batch', '-ccb',
help='A two column list, first colum the cell names as stored in the scool file, second column the associated coloring for the scatter plot',
required=False)
parserOpt.add_argument('--latexTable', '-lt',
help='Return the overlap statistics if --cell_coloring_type is given as a latex table.')
parserOpt.add_argument('--figuresize',
help='Fontsize in the plot for x and y axis.',
type=float,
nargs=2,
default=(15, 6),
metavar=('x-size', 'y-size'))
parserOpt.add_argument('--colorMap',
help='Color map to use for the heatmap, supported are the categorical colormaps from holoviews: '
'http://holoviews.org/user_guide/Colormaps.html',
default='glasbey_dark')
parserOpt.add_argument('--fontsize',
help='Fontsize in the plot for x and y axis.',
type=float,
default=15)
parserOpt.add_argument('--threads', '-t',
help='Number of threads. Using the python multiprocessing module.',
required=False,
default=8,
type=int)
parserOpt.add_argument('--help', '-h', action='help', help='show this help message and exit')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
outputFolder = os.path.dirname(os.path.abspath(args.outFileName)) + '/'
log.debug('outputFolder {}'.format(outputFolder))
if args.cell_coloring_type:
cell_name_cell_type_dict = {}
cell_type_color_dict = {}
color_cell_type_dict = {}
cell_type_counter = 0
with open(args.cell_coloring_type, 'r') as file:
for i, line in enumerate(file.readlines()):
line = line.strip()
try:
cell_name, cell_type = line.split('\t')
except Exception:
cell_name, cell_type = line.split(' ')
cell_name_cell_type_dict[cell_name] = cell_type
if cell_type not in cell_type_color_dict:
cell_type_color_dict[cell_type] = cell_type_counter
color_cell_type_dict[cell_type_counter] = cell_type
cell_type_counter += 1
if args.cell_coloring_batch:
cell_name_cell_type_dict_batch = {}
cell_type_color_dict_batch = {}
color_cell_type_dict_batch = {}
cell_type_counter_batch = 0
with open(args.cell_coloring_batch, 'r') as file:
for i, line in enumerate(file.readlines()):
line = line.strip()
try:
cell_name, cell_type = line.split('\t')
except Exception:
cell_name, cell_type = line.split(' ')
cell_name_cell_type_dict_batch[cell_name] = cell_type
if cell_type not in cell_type_color_dict_batch:
cell_type_color_dict_batch[cell_type] = cell_type_counter_batch
color_cell_type_dict_batch[cell_type_counter_batch] = cell_type
cell_type_counter_batch += 1
raw_file_name = os.path.splitext(os.path.basename(args.outFileName))[0]
neighborhood_matrix, matrices_list = create_csr_matrix_all_cells(args.matrix, args.threads, args.chromosomes, outputFolder, raw_file_name, args.intraChromosomalContactsOnly)
reduce_to_dimension = neighborhood_matrix.shape[0] - 1
if args.dimensionReductionMethod == 'knn':
if args.numberOfNearestNeighbors > reduce_to_dimension:
args.numberOfNearestNeighbors = reduce_to_dimension
nbrs = NearestNeighbors(n_neighbors=args.numberOfNearestNeighbors, algorithm='ball_tree', n_jobs=args.threads).fit(neighborhood_matrix)
neighborhood_matrix = nbrs.kneighbors_graph(mode='distance')
if args.additionalPCA:
pca = PCA(n_components=min(neighborhood_matrix.shape) - 1)
neighborhood_matrix = pca.fit_transform(neighborhood_matrix.todense())
if args.dimensionsPCA:
args.dimensionsPCA = min(args.dimensionsPCA, neighborhood_matrix.shape[0])
neighborhood_matrix = neighborhood_matrix[:, :args.dimensionsPCA]
elif args.dimensionReductionMethod == 'pca':
corrmatrix = np.cov(neighborhood_matrix.todense())
evals, eigs = linalg.eig(corrmatrix)
neighborhood_matrix = eigs[:, :reduce_to_dimension].transpose()
if args.clusterMethod == 'spectral':
spectralClustering_object = SpectralClustering(n_clusters=args.numberOfClusters, n_jobs=args.threads,
n_neighbors=reduce_to_dimension, affinity='nearest_neighbors', random_state=0, eigen_solver="arpack")
labels_clustering = spectralClustering_object.fit_predict(neighborhood_matrix)
elif args.clusterMethod == 'kmeans':
kmeans_object = KMeans(n_clusters=args.numberOfClusters, random_state=0, n_jobs=args.threads, precompute_distances=True)
labels_clustering = kmeans_object.fit_predict(neighborhood_matrix)
if args.colorMap:
colors = process_cmap(args.colorMap)
if args.cell_coloring_type:
if len(colors) < len(cell_type_color_dict):
log.error('The chosen colormap offers too less values for the number of clusters.')
exit(1)
labels_clustering_cell_type = []
for cell_name in matrices_list:
labels_clustering_cell_type.append(cell_type_color_dict[cell_name_cell_type_dict[cell_name]])
labels_clustering_cell_type = np.array(labels_clustering_cell_type)
log.debug('labels_clustering_cell_type: {}'.format(len(labels_clustering_cell_type)))
log.debug('matrices_list: {}'.format(len(matrices_list)))
label_x = 'PC1'
label_y = 'PC2'
if args.createScatterPlot:
if args.dimensionReductionMethod == 'none':
log.warning('Raw matrix clustering scatter plot needs to compute a PCA and can request large amount (> 100 GB) of memory.')
log.debug('args.additionalPCA {}'.format(args.additionalPCA))
log.debug('args.dimensionReductionMethod {}'.format(args.dimensionReductionMethod))
if args.dimensionReductionMethod == 'none' or (args.dimensionReductionMethod == 'knn' and not args.additionalPCA):
log.debug('compute pca')
pca = PCA(n_components=min(neighborhood_matrix.shape) - 1)
neighborhood_matrix_knn = pca.fit_transform(neighborhood_matrix.todense())
log.debug('compute pca')
else:
log.debug('already computed pca')
neighborhood_matrix_knn = neighborhood_matrix
if args.cell_coloring_type:
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:len(cell_type_color_dict)]):
mask = labels_clustering_cell_type == i
log.debug('plot cluster: {} {}'.format(color_cell_type_dict[i], np.sum(mask)))
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(color_cell_type_dict[i]), s=20, alpha=0.7)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '_cell_color.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
if args.cell_coloring_batch:
if len(colors) < len(cell_type_color_dict_batch):
log.error('The chosen colormap offers too less values for the number of clusters.')
exit(1)
labels_clustering_cell_type_batch = []
for cell_name in matrices_list:
labels_clustering_cell_type_batch.append(cell_type_color_dict_batch[cell_name_cell_type_dict_batch[cell_name]])
labels_clustering_cell_type_batch = np.array(labels_clustering_cell_type_batch)
log.debug('labels_clustering_cell_type: {}'.format(len(labels_clustering_cell_type_batch)))
log.debug('matrices_list: {}'.format(len(matrices_list)))
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:len(cell_type_color_dict_batch)]):
mask = labels_clustering_cell_type_batch == i
log.debug('plot cluster: {} {}'.format(color_cell_type_dict_batch[i], np.sum(mask)))
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(color_cell_type_dict_batch[i]), s=20, alpha=0.7)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '_cell_color_batch.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:args.numberOfClusters]):
mask = labels_clustering == i
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(i), s=20, alpha=0.7)
plt.legend(fontsize=args.fontsize)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
if args.latexTable and args.cell_coloring_type:
computed_clusters = set(labels_clustering)
cell_type_amounts_dict = {}
for threshold in [0.7, 0.8, 0.9]:
cell_type_amounts_dict[threshold] = {}
with open(args.latexTable, 'w') as matches_file:
header = '\\begin{table}[!htb]\n\\footnotesize\n\\begin{tabular}{|l'
body = '\\hline Cluster '
for i in range(len(color_cell_type_dict)):
mask_cell_type = labels_clustering_cell_type == i
header += '|c'
body += '& ' + str(color_cell_type_dict[i]) + ' (' + str(np.sum(mask_cell_type)) + ' cells)'
header += '|}\n'
body += '\\\\\n'
for i in computed_clusters:
body += '\\hline Cluster ' + str(i)
mask_computed_clusters = labels_clustering == i
body += ' (' + str(np.sum(mask_computed_clusters)) + ' cells)'
for j in range(len(cell_type_color_dict)):
mask_cell_type = labels_clustering_cell_type == j
mask = mask_computed_clusters & mask_cell_type
number_of_matches = np.sum(mask)
body += '& ' + str(number_of_matches)
if number_of_matches != 1:
body += ' cells / '
else:
body += ' cell / '
body += '{:.2f}'.format((number_of_matches / np.sum(mask_computed_clusters)) * 100) + ' \\% '
for threshold in [0.7, 0.8, 0.9]:
if number_of_matches / np.sum(mask_computed_clusters) >= threshold:
if color_cell_type_dict[j] in cell_type_amounts_dict[threshold]:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] += number_of_matches
else:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] = number_of_matches
else:
if color_cell_type_dict[j] in cell_type_amounts_dict[threshold]:
continue
else:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] = 0
body += '\\\\\n'
body += '\\hline ' + '&' * len(cell_type_color_dict) + '\\\\\n'
for threshold in [0.7, 0.8, 0.9]:
body += '\\hline Correct identified $>{}\\%$'.format(int(threshold * 100))
for i in range(len(cell_type_color_dict)):
mask_cell_type = labels_clustering_cell_type == i
if color_cell_type_dict[i] in cell_type_amounts_dict[threshold]:
body += '& ' + str(cell_type_amounts_dict[threshold][color_cell_type_dict[i]]) + ' / ' + str(np.sum(mask_cell_type)) + ' ('
body += '{:.2f}'.format((cell_type_amounts_dict[threshold][color_cell_type_dict[i]] / np.sum(mask_cell_type)) * 100)
else:
body += '& ' + str(0) + ' / ' + str(np.sum(mask_cell_type)) + ' ('
body += '{:.2f}'.format(0 / np.sum(mask_cell_type))
body += ' \\%)'
body += '\\\\\n'
body += '\\hline \n'
body += '\\end{tabular}\n\\caption{}\n\\end{table}'
matches_file.write(header)
matches_file.write(body)
matrices_cluster = list(zip(matrices_list, labels_clustering))
np.savetxt(args.outFileName, matrices_cluster, fmt="%s")
| true
| true
|
7907072f331999b7edbfd2c9b5f70c307d6055ee
| 3,687
|
py
|
Python
|
meta_logger.py
|
rlaboulaye/transformer
|
119195b2be1d2a3418141a73536d5167e97e06ed
|
[
"MIT"
] | null | null | null |
meta_logger.py
|
rlaboulaye/transformer
|
119195b2be1d2a3418141a73536d5167e97e06ed
|
[
"MIT"
] | 5
|
2021-03-18T21:07:06.000Z
|
2022-03-11T23:30:49.000Z
|
meta_logger.py
|
rlaboulaye/transformer
|
119195b2be1d2a3418141a73536d5167e97e06ed
|
[
"MIT"
] | null | null | null |
import os
import json
import datetime
import numpy as np
from matplotlib import pyplot as plt
class MetaLogger(object):
def __init__(self, meta_config, config, task_directory, load_directory=None, load_epoch=None):
self.results_directory = os.path.join('meta_results', str(datetime.datetime.now()))
self.results = {
'task_directory': task_directory,
'load_directory': load_directory,
'load_epoch': load_epoch,
'train_losses': [],
'train_accuracies': [],
'validation_losses': [],
'validation_accuracies': [],
'baseline_test_loss': 0,
'baseline_test_accuracy': 0,
'sgd_test_loss': 0,
'sgd_test_accuracy': 0,
'adam_test_loss': 0,
'adam_test_accuracy': 0,
'meta_optimizer_test_loss': 0,
'meta_optimizer_test_accuracy': 0,
'config': config,
'meta_config': meta_config
}
def load(self, file_path):
self.results_directory, _ = os.path.split(file_path)
with open(file_path, 'r') as file_obj:
self.results = json.load(file_obj)
def log(self):
if not os.path.exists(self.results_directory):
os.makedirs(self.results_directory)
with open('{}/results.json'.format(self.results_directory), 'w') as file_obj:
json.dump(self.results, file_obj, indent=4)
def plot(self):
plt.figure()
plt.title('Loss')
plt.xlabel('Meta Epochs')
plt.ylabel('Loss')
plt.xticks(np.arange(0, len(self.results['train_losses']) * .125, .25))
plt.plot(np.arange(.125, (len(self.results['train_losses']) + 1) * .125, .125), self.results['train_losses'], label='train')
plt.plot(np.arange(.125, (len(self.results['validation_losses']) + 1) * .125, .125), self.results['validation_losses'], label='validation')
plt.legend()
plt.savefig('{}/loss.pdf'.format(self.results_directory))
plt.close()
plt.figure()
plt.title('Accuracy')
plt.xlabel('Meta Epochs')
plt.ylabel('Accuracy')
plt.xticks(np.arange(0, len(self.results['train_accuracies']) * .125, .25))
plt.plot(np.arange(.125, (len(self.results['train_accuracies']) + 1) * .125, .125), self.results['train_accuracies'], label='train')
plt.plot(np.arange(.125, (len(self.results['validation_accuracies']) + 1) * .125, .125), self.results['validation_accuracies'], label='validation')
plt.legend()
plt.savefig('{}/accuracy.pdf'.format(self.results_directory))
plt.close()
plt.figure()
plt.title('Test Losses')
plt.ylabel('Mean Test Loss')
x_labels = ('Baseline', 'SGD', 'Adam', 'Meta Optimizer')
x_pos = np.arange(len(x_labels))
performance = [self.results['{}_test_loss'.format('_'.join(label.lower().split(' ')))] for label in x_labels]
plt.bar(x_pos, performance, align='center', alpha=0.5)
plt.xticks(x_pos, x_labels)
plt.savefig('{}/test_loss.pdf'.format(self.results_directory))
plt.close()
plt.figure()
plt.title('Test Accuracies')
plt.ylabel('Mean Test Accuracy')
x_labels = ('Baseline', 'SGD', 'Adam', 'Meta Optimizer')
x_pos = np.arange(len(x_labels))
performance = [self.results['{}_test_accuracy'.format('_'.join(label.lower().split(' ')))] for label in x_labels]
plt.bar(x_pos, performance, align='center', alpha=0.5)
plt.xticks(x_pos, x_labels)
plt.savefig('{}/test_accuracy.pdf'.format(self.results_directory))
plt.close()
| 41.897727
| 155
| 0.606184
|
import os
import json
import datetime
import numpy as np
from matplotlib import pyplot as plt
class MetaLogger(object):
def __init__(self, meta_config, config, task_directory, load_directory=None, load_epoch=None):
self.results_directory = os.path.join('meta_results', str(datetime.datetime.now()))
self.results = {
'task_directory': task_directory,
'load_directory': load_directory,
'load_epoch': load_epoch,
'train_losses': [],
'train_accuracies': [],
'validation_losses': [],
'validation_accuracies': [],
'baseline_test_loss': 0,
'baseline_test_accuracy': 0,
'sgd_test_loss': 0,
'sgd_test_accuracy': 0,
'adam_test_loss': 0,
'adam_test_accuracy': 0,
'meta_optimizer_test_loss': 0,
'meta_optimizer_test_accuracy': 0,
'config': config,
'meta_config': meta_config
}
def load(self, file_path):
self.results_directory, _ = os.path.split(file_path)
with open(file_path, 'r') as file_obj:
self.results = json.load(file_obj)
def log(self):
if not os.path.exists(self.results_directory):
os.makedirs(self.results_directory)
with open('{}/results.json'.format(self.results_directory), 'w') as file_obj:
json.dump(self.results, file_obj, indent=4)
def plot(self):
plt.figure()
plt.title('Loss')
plt.xlabel('Meta Epochs')
plt.ylabel('Loss')
plt.xticks(np.arange(0, len(self.results['train_losses']) * .125, .25))
plt.plot(np.arange(.125, (len(self.results['train_losses']) + 1) * .125, .125), self.results['train_losses'], label='train')
plt.plot(np.arange(.125, (len(self.results['validation_losses']) + 1) * .125, .125), self.results['validation_losses'], label='validation')
plt.legend()
plt.savefig('{}/loss.pdf'.format(self.results_directory))
plt.close()
plt.figure()
plt.title('Accuracy')
plt.xlabel('Meta Epochs')
plt.ylabel('Accuracy')
plt.xticks(np.arange(0, len(self.results['train_accuracies']) * .125, .25))
plt.plot(np.arange(.125, (len(self.results['train_accuracies']) + 1) * .125, .125), self.results['train_accuracies'], label='train')
plt.plot(np.arange(.125, (len(self.results['validation_accuracies']) + 1) * .125, .125), self.results['validation_accuracies'], label='validation')
plt.legend()
plt.savefig('{}/accuracy.pdf'.format(self.results_directory))
plt.close()
plt.figure()
plt.title('Test Losses')
plt.ylabel('Mean Test Loss')
x_labels = ('Baseline', 'SGD', 'Adam', 'Meta Optimizer')
x_pos = np.arange(len(x_labels))
performance = [self.results['{}_test_loss'.format('_'.join(label.lower().split(' ')))] for label in x_labels]
plt.bar(x_pos, performance, align='center', alpha=0.5)
plt.xticks(x_pos, x_labels)
plt.savefig('{}/test_loss.pdf'.format(self.results_directory))
plt.close()
plt.figure()
plt.title('Test Accuracies')
plt.ylabel('Mean Test Accuracy')
x_labels = ('Baseline', 'SGD', 'Adam', 'Meta Optimizer')
x_pos = np.arange(len(x_labels))
performance = [self.results['{}_test_accuracy'.format('_'.join(label.lower().split(' ')))] for label in x_labels]
plt.bar(x_pos, performance, align='center', alpha=0.5)
plt.xticks(x_pos, x_labels)
plt.savefig('{}/test_accuracy.pdf'.format(self.results_directory))
plt.close()
| true
| true
|
79070771acf73f2d1f4277d16824ac90848904c4
| 5,619
|
py
|
Python
|
sdno-link-monitor/mie/snmpoper.py
|
openov2/sdno-monitoring
|
7ca338dd34db36cd5a5ec574137578bac656df2a
|
[
"CC-BY-4.0"
] | null | null | null |
sdno-link-monitor/mie/snmpoper.py
|
openov2/sdno-monitoring
|
7ca338dd34db36cd5a5ec574137578bac656df2a
|
[
"CC-BY-4.0"
] | null | null | null |
sdno-link-monitor/mie/snmpoper.py
|
openov2/sdno-monitoring
|
7ca338dd34db36cd5a5ec574137578bac656df2a
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import traceback
import subprocess
from dotdict import DotDict
from xlogger import klog
oid = DotDict({
"ipAdEntAddr": ".1.3.6.1.2.1.4.20.1.1",
"ipAdEntIfIndex": ".1.3.6.1.2.1.4.20.1.2",
"ipAdEntNetMask": ".1.3.6.1.2.1.4.20.1.3",
"ipAdEntBcastAddr": ".1.3.6.1.2.1.4.20.1.4",
"ipAdEntReasmMaxSize": ".1.3.6.1.2.1.4.20.1.5",
"ifIndex": ".1.3.6.1.2.1.2.2.1.1",
"ifDescr": ".1.3.6.1.2.1.2.2.1.2",
"ifType": ".1.3.6.1.2.1.2.2.1.3",
"ifMtu": ".1.3.6.1.2.1.2.2.1.4",
"ifSpeed": ".1.3.6.1.2.1.2.2.1.5",
"ifPhysAddress": ".1.3.6.1.2.1.2.2.1.6",
"ifAdminStatus": ".1.3.6.1.2.1.2.2.1.7",
"ifOperStatus": ".1.3.6.1.2.1.2.2.1.8",
"ifLastChange": ".1.3.6.1.2.1.2.2.1.9",
"ifInOctets": ".1.3.6.1.2.1.2.2.1.10",
"ifInUcastPkts": ".1.3.6.1.2.1.2.2.1.11",
"ifInNUcastPkts": ".1.3.6.1.2.1.2.2.1.12",
"ifInDiscards": ".1.3.6.1.2.1.2.2.1.13",
"ifInErrors": ".1.3.6.1.2.1.2.2.1.14",
"ifInUnknownProtos": ".1.3.6.1.2.1.2.2.1.15",
"ifOutOctets": ".1.3.6.1.2.1.2.2.1.16",
"ifOutUcastPkts": ".1.3.6.1.2.1.2.2.1.17",
"ifOutNUcastPkts": ".1.3.6.1.2.1.2.2.1.18",
"ifOutDiscards": ".1.3.6.1.2.1.2.2.1.19",
"ifOutErrors": ".1.3.6.1.2.1.2.2.1.20",
"ifOutQLen": ".1.3.6.1.2.1.2.2.1.21",
"ifSpecific": ".1.3.6.1.2.1.2.2.1.22",
"ifName": ".1.3.6.1.2.1.31.1.1.1.1",
"ifInMulticastPkts": ".1.3.6.1.2.1.31.1.1.1.2",
"ifInBroadcastPkts": ".1.3.6.1.2.1.31.1.1.1.3",
"ifOutMulticastPkts": ".1.3.6.1.2.1.31.1.1.1.4",
"ifOutBroadcastPkts": ".1.3.6.1.2.1.31.1.1.1.5",
"ifHCInOctets": ".1.3.6.1.2.1.31.1.1.1.6",
"ifHCInUcastPkts": ".1.3.6.1.2.1.31.1.1.1.7",
"ifHCInMulticastPkts": ".1.3.6.1.2.1.31.1.1.1.8",
"ifHCInBroadcastPkts": ".1.3.6.1.2.1.31.1.1.1.9",
"ifHCOutOctets": ".1.3.6.1.2.1.31.1.1.1.10",
"ifHCOutUcastPkts": ".1.3.6.1.2.1.31.1.1.1.11",
"ifHCOutMulticastPkts": ".1.3.6.1.2.1.31.1.1.1.12",
"ifHCOutBroadcastPkts": ".1.3.6.1.2.1.31.1.1.1.13",
"ifLinkUpDownTrapEnable": ".1.3.6.1.2.1.31.1.1.1.14",
"ifHighSpeed": ".1.3.6.1.2.1.31.1.1.1.15",
"ifPromiscuousMode": ".1.3.6.1.2.1.31.1.1.1.16",
"ifConnectorPresent": ".1.3.6.1.2.1.31.1.1.1.17",
"ifAlias": ".1.3.6.1.2.1.31.1.1.1.18",
"ifCounterDiscontinuityTime": ".1.3.6.1.2.1.31.1.1.1.19",
# HUAWEI-MPLS-EXTEND-MIB
"hwMplsTunnelStatisticsTunnelIndex": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.1",
"hwMplsTunnelStatisticsIngressLSRId": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.2",
"hwMplsTunnelStatisticsEgressLSRId": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.3",
"hwMplsTunnelStatisticsHCInOctets": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.4",
"hwMplsTunnelStatisticsHCOutOctets": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.5",
})
class SnmpOper():
@classmethod
def splitline(cls, line, oid):
def convert(type, value):
table = {
"Counter32": int,
"Counter64": int,
"Gauge32": int,
"Hex-STRING": str.strip,
"INTEGER": int,
"IpAddress": str,
"OID": str,
"STRING": lambda x: x[1:-1],
"Timeticks": str,
}
return table.get(type, str)(value)
try:
pfxlen = len(oid) + 1
segs = line.split()
if segs > 3 and segs[0].startswith(oid):
name = segs[0][pfxlen:]
type = segs[2][:-1]
value = convert(type, line.split(":")[1][1:])
return name, type, value
except:
pass
return None, None, "What????"
@classmethod
def subcall(cls, cmd):
try:
return subprocess.check_output(cmd).replace("\r", "\n").split("\n")
except:
klog.e("CMD:%s\r\nBT:%s" % (cmd, traceback.format_exc()))
return []
@classmethod
def get(cls, host, comm, vern, oid):
cmd = ['snmpget', '-Oe', '-On', '-v', vern, '-c', comm, host, oid]
lines = cls.subcall(cmd)
return cls.splitline(lines[0], oid)
@classmethod
def walk(cls, host, comm, vern, oid):
cmd = ['snmpwalk', '-Oe', '-On', '-v', vern, '-c', comm, host, oid]
return cls.subcall(cmd)
| 42.89313
| 82
| 0.479267
|
import traceback
import subprocess
from dotdict import DotDict
from xlogger import klog
oid = DotDict({
"ipAdEntAddr": ".1.3.6.1.2.1.4.20.1.1",
"ipAdEntIfIndex": ".1.3.6.1.2.1.4.20.1.2",
"ipAdEntNetMask": ".1.3.6.1.2.1.4.20.1.3",
"ipAdEntBcastAddr": ".1.3.6.1.2.1.4.20.1.4",
"ipAdEntReasmMaxSize": ".1.3.6.1.2.1.4.20.1.5",
"ifIndex": ".1.3.6.1.2.1.2.2.1.1",
"ifDescr": ".1.3.6.1.2.1.2.2.1.2",
"ifType": ".1.3.6.1.2.1.2.2.1.3",
"ifMtu": ".1.3.6.1.2.1.2.2.1.4",
"ifSpeed": ".1.3.6.1.2.1.2.2.1.5",
"ifPhysAddress": ".1.3.6.1.2.1.2.2.1.6",
"ifAdminStatus": ".1.3.6.1.2.1.2.2.1.7",
"ifOperStatus": ".1.3.6.1.2.1.2.2.1.8",
"ifLastChange": ".1.3.6.1.2.1.2.2.1.9",
"ifInOctets": ".1.3.6.1.2.1.2.2.1.10",
"ifInUcastPkts": ".1.3.6.1.2.1.2.2.1.11",
"ifInNUcastPkts": ".1.3.6.1.2.1.2.2.1.12",
"ifInDiscards": ".1.3.6.1.2.1.2.2.1.13",
"ifInErrors": ".1.3.6.1.2.1.2.2.1.14",
"ifInUnknownProtos": ".1.3.6.1.2.1.2.2.1.15",
"ifOutOctets": ".1.3.6.1.2.1.2.2.1.16",
"ifOutUcastPkts": ".1.3.6.1.2.1.2.2.1.17",
"ifOutNUcastPkts": ".1.3.6.1.2.1.2.2.1.18",
"ifOutDiscards": ".1.3.6.1.2.1.2.2.1.19",
"ifOutErrors": ".1.3.6.1.2.1.2.2.1.20",
"ifOutQLen": ".1.3.6.1.2.1.2.2.1.21",
"ifSpecific": ".1.3.6.1.2.1.2.2.1.22",
"ifName": ".1.3.6.1.2.1.31.1.1.1.1",
"ifInMulticastPkts": ".1.3.6.1.2.1.31.1.1.1.2",
"ifInBroadcastPkts": ".1.3.6.1.2.1.31.1.1.1.3",
"ifOutMulticastPkts": ".1.3.6.1.2.1.31.1.1.1.4",
"ifOutBroadcastPkts": ".1.3.6.1.2.1.31.1.1.1.5",
"ifHCInOctets": ".1.3.6.1.2.1.31.1.1.1.6",
"ifHCInUcastPkts": ".1.3.6.1.2.1.31.1.1.1.7",
"ifHCInMulticastPkts": ".1.3.6.1.2.1.31.1.1.1.8",
"ifHCInBroadcastPkts": ".1.3.6.1.2.1.31.1.1.1.9",
"ifHCOutOctets": ".1.3.6.1.2.1.31.1.1.1.10",
"ifHCOutUcastPkts": ".1.3.6.1.2.1.31.1.1.1.11",
"ifHCOutMulticastPkts": ".1.3.6.1.2.1.31.1.1.1.12",
"ifHCOutBroadcastPkts": ".1.3.6.1.2.1.31.1.1.1.13",
"ifLinkUpDownTrapEnable": ".1.3.6.1.2.1.31.1.1.1.14",
"ifHighSpeed": ".1.3.6.1.2.1.31.1.1.1.15",
"ifPromiscuousMode": ".1.3.6.1.2.1.31.1.1.1.16",
"ifConnectorPresent": ".1.3.6.1.2.1.31.1.1.1.17",
"ifAlias": ".1.3.6.1.2.1.31.1.1.1.18",
"ifCounterDiscontinuityTime": ".1.3.6.1.2.1.31.1.1.1.19",
"hwMplsTunnelStatisticsTunnelIndex": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.1",
"hwMplsTunnelStatisticsIngressLSRId": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.2",
"hwMplsTunnelStatisticsEgressLSRId": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.3",
"hwMplsTunnelStatisticsHCInOctets": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.4",
"hwMplsTunnelStatisticsHCOutOctets": ".1.3.6.1.4.1.2011.5.25.121.1.14.1.5",
})
class SnmpOper():
@classmethod
def splitline(cls, line, oid):
def convert(type, value):
table = {
"Counter32": int,
"Counter64": int,
"Gauge32": int,
"Hex-STRING": str.strip,
"INTEGER": int,
"IpAddress": str,
"OID": str,
"STRING": lambda x: x[1:-1],
"Timeticks": str,
}
return table.get(type, str)(value)
try:
pfxlen = len(oid) + 1
segs = line.split()
if segs > 3 and segs[0].startswith(oid):
name = segs[0][pfxlen:]
type = segs[2][:-1]
value = convert(type, line.split(":")[1][1:])
return name, type, value
except:
pass
return None, None, "What????"
@classmethod
def subcall(cls, cmd):
try:
return subprocess.check_output(cmd).replace("\r", "\n").split("\n")
except:
klog.e("CMD:%s\r\nBT:%s" % (cmd, traceback.format_exc()))
return []
@classmethod
def get(cls, host, comm, vern, oid):
cmd = ['snmpget', '-Oe', '-On', '-v', vern, '-c', comm, host, oid]
lines = cls.subcall(cmd)
return cls.splitline(lines[0], oid)
@classmethod
def walk(cls, host, comm, vern, oid):
cmd = ['snmpwalk', '-Oe', '-On', '-v', vern, '-c', comm, host, oid]
return cls.subcall(cmd)
| true
| true
|
790707b3806087a104bed7112b6abaf0389030df
| 1,142
|
py
|
Python
|
Implementations/Conditional-Variational-Autoencoder/plot_utils.py
|
jaywonchung/Learning-ML
|
5298318686144a78bed42d979e10fbd9979c0159
|
[
"MIT"
] | 10
|
2019-01-18T10:32:36.000Z
|
2022-03-14T08:40:23.000Z
|
Implementations/Conditional-Variational-Autoencoder/plot_utils.py
|
jaywonchung/Learning-ML
|
5298318686144a78bed42d979e10fbd9979c0159
|
[
"MIT"
] | null | null | null |
Implementations/Conditional-Variational-Autoencoder/plot_utils.py
|
jaywonchung/Learning-ML
|
5298318686144a78bed42d979e10fbd9979c0159
|
[
"MIT"
] | null | null | null |
import torchvision
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def display_and_save_batch(title, batch, data, save=True, display=True):
"""Display and save batch of image using plt"""
im = torchvision.utils.make_grid(batch, nrow=int(batch.shape[0]**0.5))
plt.title(title)
plt.imshow(np.transpose(im.cpu().numpy(), (1, 2, 0)), cmap='gray')
if save:
plt.savefig('results/' + title + data + '.png', transparent=True, bbox_inches='tight')
if display:
plt.show()
def display_and_save_latent(batch, label, data, save=True, display=True):
"""Display and save batch of 2-D latent variable using plt"""
colors = ['black', 'red', 'green', 'blue', 'yellow', 'cyan', 'magenta', 'pink', 'violet', 'grey']
z = batch.cpu().detach().numpy()
l = label.cpu().numpy()
plt.title('Latent variables')
plt.scatter(z[:,0], z[:,1], c=l, cmap=matplotlib.colors.ListedColormap(colors))
plt.xlim(-3, 3, )
plt.ylim(-3, 3)
if save:
plt.savefig('results/latent-variable' + data + '.png', transparent=True, bbox_inches='tight')
if display:
plt.show()
| 39.37931
| 101
| 0.643608
|
import torchvision
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def display_and_save_batch(title, batch, data, save=True, display=True):
im = torchvision.utils.make_grid(batch, nrow=int(batch.shape[0]**0.5))
plt.title(title)
plt.imshow(np.transpose(im.cpu().numpy(), (1, 2, 0)), cmap='gray')
if save:
plt.savefig('results/' + title + data + '.png', transparent=True, bbox_inches='tight')
if display:
plt.show()
def display_and_save_latent(batch, label, data, save=True, display=True):
colors = ['black', 'red', 'green', 'blue', 'yellow', 'cyan', 'magenta', 'pink', 'violet', 'grey']
z = batch.cpu().detach().numpy()
l = label.cpu().numpy()
plt.title('Latent variables')
plt.scatter(z[:,0], z[:,1], c=l, cmap=matplotlib.colors.ListedColormap(colors))
plt.xlim(-3, 3, )
plt.ylim(-3, 3)
if save:
plt.savefig('results/latent-variable' + data + '.png', transparent=True, bbox_inches='tight')
if display:
plt.show()
| true
| true
|
790708d75974359baeaa5c9eaae0b5dd0526eb90
| 178
|
py
|
Python
|
twitter_crawlers/tweepy_crawler/setup.py
|
MCardus/GuruFinder
|
cfa6b9fb0401a0fd9e637c5549b69d49b6b857e5
|
[
"MIT"
] | null | null | null |
twitter_crawlers/tweepy_crawler/setup.py
|
MCardus/GuruFinder
|
cfa6b9fb0401a0fd9e637c5549b69d49b6b857e5
|
[
"MIT"
] | 1
|
2021-06-01T22:28:57.000Z
|
2021-06-01T22:28:57.000Z
|
twitter_crawlers/tweepy_crawler/setup.py
|
MCardus/GuruFinder
|
cfa6b9fb0401a0fd9e637c5549b69d49b6b857e5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
setup(
name='tweepy_crawler',
version='0.1',
license='MIT',
long_description=open('README.md').read(),
)
| 16.181818
| 46
| 0.657303
|
from distutils.core import setup
setup(
name='tweepy_crawler',
version='0.1',
license='MIT',
long_description=open('README.md').read(),
)
| true
| true
|
790709c7d3c09f621cff124c60d865458ae55151
| 822
|
py
|
Python
|
orlov/libs/workspace/fixture.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
orlov/libs/workspace/fixture.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
orlov/libs/workspace/fixture.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
""" Orlov Module : workspace module fixture. """
import os
import logging
import pytest
from orlov.libs.workspace import Workspace
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session')
def workspace(request) -> Workspace:
""" Workspace Factory Fixture.
Yields:
directory(Workspace): Workspace Created.
"""
logger.debug('Setup of test structure.')
# create screenshot directory
if request.config.getoption('workspace'):
result_dir = request.config.getoption('workspace')
else:
if not os.path.exists('result'):
logger.debug('Creating results folder to store results')
os.mkdir('result')
result_dir = os.path.join(os.getcwd(), 'result')
logger.debug('Created folder %s', result_dir)
yield Workspace(result_dir)
| 27.4
| 68
| 0.678832
|
import os
import logging
import pytest
from orlov.libs.workspace import Workspace
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session')
def workspace(request) -> Workspace:
logger.debug('Setup of test structure.')
if request.config.getoption('workspace'):
result_dir = request.config.getoption('workspace')
else:
if not os.path.exists('result'):
logger.debug('Creating results folder to store results')
os.mkdir('result')
result_dir = os.path.join(os.getcwd(), 'result')
logger.debug('Created folder %s', result_dir)
yield Workspace(result_dir)
| true
| true
|
79070a37c6849ec01a26042f84b09163c6188f06
| 643
|
py
|
Python
|
dataPlotter.py
|
ethantsai/nlwhistlers
|
1b8cabf96e4fbb9a032bb4cd03797d65fe7a144b
|
[
"MIT"
] | 1
|
2021-05-24T20:46:20.000Z
|
2021-05-24T20:46:20.000Z
|
dataPlotter.py
|
ethantsai/nlwhistlers
|
1b8cabf96e4fbb9a032bb4cd03797d65fe7a144b
|
[
"MIT"
] | null | null | null |
dataPlotter.py
|
ethantsai/nlwhistlers
|
1b8cabf96e4fbb9a032bb4cd03797d65fe7a144b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
lines = open("for_james.csv").read().splitlines()
data = [[float(x) for x in lines[i].split(", ")] for i in range(len(lines))]
# each item in data is a list of floats that can be passed to plt.hist
for i in range(9):
plt.hist(data[i], bins=np.logspace(1, 3, 20))
plt.title(f'Precipitating Energy Distribution at t = {i+0.5} sec')
plt.xscale("log"); plt.yscale("log"); plt.xlabel('Energy (KeV)'); plt.ylabel('Number of Particles')
plt.ylim(10,600); plt.xlim(10,1000)
plt.savefig(f'results/plots/preciphist{i}.png')
plt.clf()
| 40.1875
| 103
| 0.682737
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
lines = open("for_james.csv").read().splitlines()
data = [[float(x) for x in lines[i].split(", ")] for i in range(len(lines))]
for i in range(9):
plt.hist(data[i], bins=np.logspace(1, 3, 20))
plt.title(f'Precipitating Energy Distribution at t = {i+0.5} sec')
plt.xscale("log"); plt.yscale("log"); plt.xlabel('Energy (KeV)'); plt.ylabel('Number of Particles')
plt.ylim(10,600); plt.xlim(10,1000)
plt.savefig(f'results/plots/preciphist{i}.png')
plt.clf()
| true
| true
|
79070a39084122784f83decadf7c9b2e86fcb249
| 4,016
|
py
|
Python
|
transformers4rec/tf/block/dlrm.py
|
Jwmc999/Transformers4Rec
|
e6cdf13a7c0102303c0258120274f88b2d42c9c2
|
[
"Apache-2.0"
] | 415
|
2021-09-20T20:47:34.000Z
|
2022-03-31T16:51:03.000Z
|
transformers4rec/tf/block/dlrm.py
|
Jwmc999/Transformers4Rec
|
e6cdf13a7c0102303c0258120274f88b2d42c9c2
|
[
"Apache-2.0"
] | 128
|
2021-09-21T07:19:38.000Z
|
2022-03-31T15:08:27.000Z
|
transformers4rec/tf/block/dlrm.py
|
Jwmc999/Transformers4Rec
|
e6cdf13a7c0102303c0258120274f88b2d42c9c2
|
[
"Apache-2.0"
] | 44
|
2021-09-23T07:25:36.000Z
|
2022-03-29T04:17:53.000Z
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Optional, Union, cast
import tensorflow as tf
from merlin_standard_lib import Schema, Tag
from ..features.continuous import ContinuousFeatures
from ..features.embedding import EmbeddingFeatures
from ..tabular.base import TabularBlock
from .base import Block, BlockType
class ExpandDimsAndToTabular(tf.keras.layers.Lambda):
def __init__(self, **kwargs):
super().__init__(lambda x: dict(continuous=x), **kwargs)
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class DLRMBlock(Block):
def __init__(
self,
continuous_features: Union[List[str], Schema, Optional[TabularBlock]],
embedding_layer: EmbeddingFeatures,
bottom_mlp: BlockType,
top_mlp: Optional[BlockType] = None,
interaction_layer: Optional[tf.keras.layers.Layer] = None,
**kwargs
):
super().__init__(**kwargs)
_continuous_features: Optional[TabularBlock]
if isinstance(continuous_features, Schema):
_continuous_features = cast(
Optional[TabularBlock],
ContinuousFeatures.from_schema(
cast(Schema, continuous_features), aggregation="concat"
),
)
if isinstance(continuous_features, list):
_continuous_features = ContinuousFeatures.from_features(
continuous_features, aggregation="concat"
)
else:
_continuous_features = cast(Optional[TabularBlock], continuous_features)
if _continuous_features:
continuous_embedding = _continuous_features >> bottom_mlp >> ExpandDimsAndToTabular()
continuous_embedding.block_name = "ContinuousEmbedding"
self.stack_features = embedding_layer.merge(continuous_embedding, aggregation="stack")
else:
embedding_layer.set_aggregation("stack")
self.stack_features = embedding_layer
# self.stack_features = tabular.MergeTabular(embedding_layer, continuous_embedding,
# aggregation_registry="stack")
# self.stack_features = embedding_layer + continuous_embedding
# self.stack_features.aggregation_registry = "stack"
from ..layers import DotProductInteraction
self.interaction_layer = interaction_layer or DotProductInteraction()
self.top_mlp = top_mlp
@classmethod
def from_schema(
cls, schema: Schema, bottom_mlp: BlockType, top_mlp: Optional[BlockType] = None, **kwargs
):
embedding_layer = EmbeddingFeatures.from_schema(
schema.select_by_tag(Tag.CATEGORICAL),
infer_embedding_sizes=False,
embedding_dim_default=bottom_mlp.layers[-1].units,
)
if not embedding_layer:
raise ValueError("embedding_layer must be set.")
continuous_features = cast(
Optional[TabularBlock],
ContinuousFeatures.from_schema(
schema.select_by_tag(Tag.CONTINUOUS), aggregation="concat"
),
)
return cls(continuous_features, embedding_layer, bottom_mlp, top_mlp=top_mlp, **kwargs)
def call(self, inputs, **kwargs):
stacked = self.stack_features(inputs)
interactions = self.interaction_layer(stacked)
return interactions if not self.top_mlp else self.top_mlp(interactions)
| 37.185185
| 98
| 0.680777
|
from typing import List, Optional, Union, cast
import tensorflow as tf
from merlin_standard_lib import Schema, Tag
from ..features.continuous import ContinuousFeatures
from ..features.embedding import EmbeddingFeatures
from ..tabular.base import TabularBlock
from .base import Block, BlockType
class ExpandDimsAndToTabular(tf.keras.layers.Lambda):
def __init__(self, **kwargs):
super().__init__(lambda x: dict(continuous=x), **kwargs)
@tf.keras.utils.register_keras_serializable(package="transformers4rec")
class DLRMBlock(Block):
def __init__(
self,
continuous_features: Union[List[str], Schema, Optional[TabularBlock]],
embedding_layer: EmbeddingFeatures,
bottom_mlp: BlockType,
top_mlp: Optional[BlockType] = None,
interaction_layer: Optional[tf.keras.layers.Layer] = None,
**kwargs
):
super().__init__(**kwargs)
_continuous_features: Optional[TabularBlock]
if isinstance(continuous_features, Schema):
_continuous_features = cast(
Optional[TabularBlock],
ContinuousFeatures.from_schema(
cast(Schema, continuous_features), aggregation="concat"
),
)
if isinstance(continuous_features, list):
_continuous_features = ContinuousFeatures.from_features(
continuous_features, aggregation="concat"
)
else:
_continuous_features = cast(Optional[TabularBlock], continuous_features)
if _continuous_features:
continuous_embedding = _continuous_features >> bottom_mlp >> ExpandDimsAndToTabular()
continuous_embedding.block_name = "ContinuousEmbedding"
self.stack_features = embedding_layer.merge(continuous_embedding, aggregation="stack")
else:
embedding_layer.set_aggregation("stack")
self.stack_features = embedding_layer
from ..layers import DotProductInteraction
self.interaction_layer = interaction_layer or DotProductInteraction()
self.top_mlp = top_mlp
@classmethod
def from_schema(
cls, schema: Schema, bottom_mlp: BlockType, top_mlp: Optional[BlockType] = None, **kwargs
):
embedding_layer = EmbeddingFeatures.from_schema(
schema.select_by_tag(Tag.CATEGORICAL),
infer_embedding_sizes=False,
embedding_dim_default=bottom_mlp.layers[-1].units,
)
if not embedding_layer:
raise ValueError("embedding_layer must be set.")
continuous_features = cast(
Optional[TabularBlock],
ContinuousFeatures.from_schema(
schema.select_by_tag(Tag.CONTINUOUS), aggregation="concat"
),
)
return cls(continuous_features, embedding_layer, bottom_mlp, top_mlp=top_mlp, **kwargs)
def call(self, inputs, **kwargs):
stacked = self.stack_features(inputs)
interactions = self.interaction_layer(stacked)
return interactions if not self.top_mlp else self.top_mlp(interactions)
| true
| true
|
79070a691786b7d34291df055a14a81595740595
| 75
|
py
|
Python
|
python__OOP/09.inheritance_exercise/01.person/child.py
|
EmilianStoyanov/Projects-in-SoftUni
|
e83996670fe00424a158905d537a7bbbeee8fb59
|
[
"MIT"
] | 1
|
2020-07-14T12:32:47.000Z
|
2020-07-14T12:32:47.000Z
|
python__OOP/09.inheritance_exercise/01.person/child.py
|
EmilianStoyanov/Projects-in-SoftUni
|
e83996670fe00424a158905d537a7bbbeee8fb59
|
[
"MIT"
] | null | null | null |
python__OOP/09.inheritance_exercise/01.person/child.py
|
EmilianStoyanov/Projects-in-SoftUni
|
e83996670fe00424a158905d537a7bbbeee8fb59
|
[
"MIT"
] | null | null | null |
from Person_1.project.person import Person
class Child(Person):
pass
| 12.5
| 42
| 0.76
|
from Person_1.project.person import Person
class Child(Person):
pass
| true
| true
|
79070a6bbd847ff83c3a657630a4d6d85784302a
| 1,363
|
py
|
Python
|
mvpa2/tests/test_misc_plot.py
|
mortonne/PyMVPA
|
98644c5cd9733edd39fac746ea7cf67398674645
|
[
"MIT"
] | null | null | null |
mvpa2/tests/test_misc_plot.py
|
mortonne/PyMVPA
|
98644c5cd9733edd39fac746ea7cf67398674645
|
[
"MIT"
] | null | null | null |
mvpa2/tests/test_misc_plot.py
|
mortonne/PyMVPA
|
98644c5cd9733edd39fac746ea7cf67398674645
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA misc.plot"""
from mvpa2.testing import *
skip_if_no_external("pylab")
import pylab as pl
from matplotlib.figure import Figure
from mvpa2.misc.plot.base import plot_dataset_chunks
import numpy as np
from glob import glob
from mock import patch
from os.path import join as pjoin
data2d = np.random.randn(2, 4, 4)
data3d = np.random.randn(3, 4, 4)
data2d_3d = np.random.randn(2, 4, 4, 4)
data2d_4d = np.random.randn(2, 4, 4, 4, 2)
data2d_5d = np.random.randn(2, 4, 4, 4, 2, 3)
from mvpa2.testing.datasets import datasets
@sweepargs(dsp=list(datasets.items()))
def test_plot_dataset_chunks(dsp):
dsname, ds = dsp
if ds.targets.dtype.kind == "f":
return
# smoke test for now
if "chunks" not in ds.sa:
return # nothing to plot in this one
print(dsname)
plot_dataset_chunks(ds[:, :2]) # could only plot two
pl.close(pl.gcf())
if ds.nfeatures > 2:
assert_raises(ValueError, plot_dataset_chunks, ds)
| 29
| 78
| 0.612619
| true
| true
|
|
79070aec9110a122228a870b6404344568d47a77
| 3,680
|
py
|
Python
|
spider/python/tutorial/pipelines.py
|
ferryhang/spider_job
|
871309c86df8dd9abc37798415686344242210e2
|
[
"MIT"
] | 322
|
2018-08-06T17:44:23.000Z
|
2022-03-31T02:42:54.000Z
|
spider/python/tutorial/pipelines.py
|
ferryhang/spider_job
|
871309c86df8dd9abc37798415686344242210e2
|
[
"MIT"
] | 10
|
2019-03-16T03:57:17.000Z
|
2022-03-17T07:51:17.000Z
|
spider/python/tutorial/pipelines.py
|
ferryhang/spider_job
|
871309c86df8dd9abc37798415686344242210e2
|
[
"MIT"
] | 116
|
2018-08-07T02:02:28.000Z
|
2022-03-24T08:15:55.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
import datetime
from scrapy.conf import settings
# 学历列表
educations = ("不限","大专","本科","硕士","博士")
#修正学历 有些职位中的学历明显不一致。需要修正
def clean_education(edu,body):
if edu not in educations:
for i in educations:
if i in body:
edu = i
else:
edu = '不限'
return edu
def clear_salary(salary):
res = salary.split("-")
temp = []
for x in res:
temp.append(int(x.upper().replace("K"," "))*1000)
result = {
"min":temp[0],
"max":temp[1],
"avg":int((temp[0]+temp[1])/2)
}
return result
def clear_time(time):
now_year = datetime.datetime.now().year
if '发布于' in time:
time = time.replace("发布于", str(now_year)+"-")
time = time.replace("月", "-")
time = time.replace("日", "")
if time.find("昨天") > 0:
time = str(datetime.date.today() - datetime.timedelta(days=1))
elif time.find(":") > 0:
time = str(datetime.date.today())
return time
def clear_position(name):
data = name.split(" ")
name = data[0]
work_year = data[-2]
educational = data[-1]
return name,work_year,educational
#判断PHP是否在职位名称中,不在就过滤掉。
#jd中含有php不参考,因为很多jd中都乱写
def clean_name(name):
if "PHP" not in name.upper():
return False
return True
class TutorialPipeline(object):
def process_item(self, item, spider):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['position2']
collection.insert(dict(item))
client.close()
return item
#处理直聘网数据
class ZhipinPipeline(object):
def process_item(self, item, spider):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['position']
item['salary'] = clear_salary(item['salary'])
item['create_time'] = clear_time(item['create_time'])
item['educational'] = clean_education(item['educational'],item['body'])
is_php = clean_name(item['position_name'])
if is_php is True:
collection.insert(dict(item))
client.close()
return item
#处理51job数据
class FiveJobPipeline(object):
def clear_salary(self,salary):
lists = salary.split("/")[0].split('-')
min,max = lists
unit = 10000
if "千" in max:
unit = 1000
max = max.replace("千","")
else:
max = max.replace("万","")
print(max)
result = {}
result['min'] = float(min)*unit
result['max'] = float(max)*unit
result['avg'] = (result['max']+result['min'])/2
return result
def clear_address(self,address):
if "上班地址" in address:
address = address.replace("上班地址 :"," ")
return address
def clear_workyear(self,work_year):
if "经验" in work_year:
work_year = work_year.replace("工作经验"," ") or work_year.replace("经验"," ")
return work_year
def process_item(self, item, spider):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['51job']
item['salary'] = self.clear_salary(salary=item['salary'])
item['address'] = self.clear_address(address=item['address'])
item['work_year'] = self.clear_workyear(work_year=item['work_year'])
collection.insert(dict(item))
client.close()
return item
| 29.44
| 84
| 0.580435
|
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
import datetime
from scrapy.conf import settings
# 学历列表
educations = ("不限","大专","本科","硕士","博士")
#修正学历 有些职位中的学历明显不一致。需要修正
def clean_education(edu,body):
if edu not in educations:
for i in educations:
if i in body:
edu = i
else:
edu = '不限'
return edu
def clear_salary(salary):
res = salary.split("-")
temp = []
for x in res:
temp.append(int(x.upper().replace("K"," "))*1000)
result = {
"min":temp[0],
"max":temp[1],
"avg":int((temp[0]+temp[1])/2)
}
return result
def clear_time(time):
now_year = datetime.datetime.now().year
if '发布于' in time:
time = time.replace("发布于", str(now_year)+"-")
time = time.replace("月", "-")
time = time.replace("日", "")
if time.find("昨天") > 0:
time = str(datetime.date.today() - datetime.timedelta(days=1))
elif time.find(":") > 0:
time = str(datetime.date.today())
return time
def clear_position(name):
data = name.split(" ")
name = data[0]
work_year = data[-2]
educational = data[-1]
return name,work_year,educational
#判断PHP是否在职位名称中,不在就过滤掉。
#jd中含有php不参考,因为很多jd中都乱写
def clean_name(name):
if "PHP" not in name.upper():
return False
return True
class TutorialPipeline(object):
def process_item(self, item, spider):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['position2']
collection.insert(dict(item))
client.close()
return item
#处理直聘网数据
class ZhipinPipeline(object):
def process_item(self, item, spider):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['position']
item['salary'] = clear_salary(item['salary'])
item['create_time'] = clear_time(item['create_time'])
item['educational'] = clean_education(item['educational'],item['body'])
is_php = clean_name(item['position_name'])
if is_php is True:
collection.insert(dict(item))
client.close()
return item
#处理51job数据
class FiveJobPipeline(object):
def clear_salary(self,salary):
lists = salary.split("/")[0].split('-')
min,max = lists
unit = 10000
if "千" in max:
unit = 1000
max = max.replace("千","")
else:
max = max.replace("万","")
print(max)
result = {}
result['min'] = float(min)*unit
result['max'] = float(max)*unit
result['avg'] = (result['max']+result['min'])/2
return result
def clear_address(self,address):
if "上班地址" in address:
address = address.replace("上班地址 :"," ")
return address
def clear_workyear(self,work_year):
if "经验" in work_year:
work_year = work_year.replace("工作经验"," ") or work_year.replace("经验"," ")
return work_year
def process_item(self, item, spider):
client = pymongo.MongoClient(host="127.0.0.1", port=27017)
db = client['job']
collection = db['51job']
item['salary'] = self.clear_salary(salary=item['salary'])
item['address'] = self.clear_address(address=item['address'])
item['work_year'] = self.clear_workyear(work_year=item['work_year'])
collection.insert(dict(item))
client.close()
return item
| true
| true
|
79070bbbfd11ab08a9c1e746c305db66b5ba891c
| 87,593
|
py
|
Python
|
pandas/core/arrays/categorical.py
|
getschomp/pandas
|
85dc1713bc6a1064f4afdf2a907bc9c72cdc364b
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T00:35:27.000Z
|
2019-01-31T00:35:27.000Z
|
pandas/core/arrays/categorical.py
|
getschomp/pandas
|
85dc1713bc6a1064f4afdf2a907bc9c72cdc364b
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/arrays/categorical.py
|
getschomp/pandas
|
85dc1713bc6a1064f4afdf2a907bc9c72cdc364b
|
[
"BSD-3-Clause"
] | null | null | null |
# pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same (maybe up to ordering, depending on ordered)
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
# both unordered and different order
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of `values` (sorted, if
possible, otherwise in the order in which they appear).
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If True, the resulting categorical will be ordered.
An ordered categorical respects, when sorted, the order of its
`categories` attribute (which in turn is the `categories` argument, if
provided).
dtype : CategoricalDtype
An instance of ``CategoricalDtype`` to use for this categorical
.. versionadded:: 0.21.0
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
dtype : CategoricalDtype
The instance of ``CategoricalDtype`` storing the ``categories``
and ``ordered``.
.. versionadded:: 0.21.0
Methods
-------
from_codes
__array__
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> pd.Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]
>>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a, b, c]
Ordered `Categoricals` can be sorted according to the custom order
of the categories and can have a min and max value.
>>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True,
... categories=['c', 'b', 'a'])
>>> c
[a, b, c, a, b, c]
Categories (3, object): [c < b < a]
>>> c.min()
'c'
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more.
See also
--------
pandas.api.types.CategoricalDtype : Type for categorical data
CategoricalIndex : An Index with an underlying ``Categorical``
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
# Ways of specifying the dtype (prioritized ordered)
# 1. dtype is a CategoricalDtype
# a.) with known categories, use dtype.categories
# b.) else with Categorical values, use values.dtype
# c.) else, infer from values
# d.) specifying dtype=CategoricalDtype and categories is an error
# 2. dtype is a string 'category'
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
# 3. dtype is None
# a.) use categories, ordered
# b.) use values.dtype
# c.) infer from values
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype
dtype = CategoricalDtype(categories, ordered)
# At this point, dtype is always a CategoricalDtype
# if dtype.categories is None, we are inferring
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
# null_mask indicates missing values we want to exclude from inference.
# This means: only missing values in list-likes (not arrays/ndframes).
null_mask = np.array(False)
# sanitize input
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
# _sanitize_array coerces np.nan to a string under certain versions
# of numpy
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# By convention, empty lists result in object dtype:
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
# Reinsert -1 placeholders for previously removed missing values
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
"""The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self.dtype.ordered
@property
def dtype(self):
"""The :class:`~pandas.api.types.CategoricalDtype` for this instance"""
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
""" Copy constructor. """
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
"""
Coerce this type to another dtype
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and dtype is categorical, the original
object is returned.
.. versionadded:: 0.19.0
"""
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return list(self)
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
"""Construct a Categorical from inferred values
For inferred categories (`dtype` is None) the categories are sorted.
For explicit `dtype`, the `inferred_categories` are cast to the
appropriate type.
Parameters
----------
inferred_categories : Index
inferred_codes : Index
dtype : CategoricalDtype or 'category'
Returns
-------
Categorical
"""
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialzed type with `dtype` if specified
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
# recode from observation order to dtype.categories order
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# sort categories and recode for unknown categories
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
""" Sets new categories inplace
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(['a', 'b'])
>>> c
[a, b]
Categories (2, object): [a, b]
>>> c._set_categories(pd.Index(['a', 'c']))
>>> c
[a, c]
Categories (2, object): [a, c]
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
"""Internal method for directly updating the CategoricalDtype
Parameters
----------
dtype : CategoricalDtype
Notes
-----
We don't do any validation here. It's assumed that the dtype is
a (valid) instance of `CategoricalDtype`.
"""
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
Raises
------
ValueError
If new categories are list-like and do not have the same number of
items than the current categories or do not validate as categories
Parameters
----------
new_categories : list-like, dict-like or callable
* list-like: all items must be unique and the number of items in
the new categories must match the existing number of categories.
* dict-like: specifies a mapping from
old categories to new. Categories not contained in the mapping
are passed through and extra categories in the mapping are
ignored.
.. versionadded:: 0.21.0
* callable : a callable that is called on all items in the old
categories and whose return values comprise the new categories.
.. versionadded:: 0.23.0
.. warning::
Currently, Series are considered list like. In a future version
of pandas they'll be considered dict-like.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical or None
With ``inplace=False``, the new categorical is returned.
With ``inplace=True``, there is no return value.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
Examples
--------
>>> c = pd.Categorical(['a', 'a', 'b'])
>>> c.rename_categories([0, 1])
[0, 0, 1]
Categories (2, int64): [0, 1]
For dict-like ``new_categories``, extra keys are ignored and
categories not in the dictionary are passed through
>>> c.rename_categories({'a': 'A', 'c': 'C'})
[A, A, b]
Categories (2, object): [A, b]
You may also provide a callable to create the new categories
>>> c.rename_categories(lambda x: x.upper())
[A, A, B]
Categories (2, object): [A, B]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# we need to ensure __array__ get's all the way to an
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
"""
Detect missing values
Missing values (-1 in .codes) are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
isna : top-level isna
isnull : alias of isna
Categorical.notna : boolean inverse of Categorical.isna
"""
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
"""
Inverse of isna
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
notna : top-level notna
notnull : alias of notna
Categorical.isna : boolean inverse of Categorical.notna
"""
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Missing values (-1 in .codes) are detected.
Returns
-------
valid : Categorical
"""
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
"""Return the indices that would sort the Categorical.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
Notes
-----
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Examples
--------
>>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()
array([2, 0, 1, 3])
>>> cat = pd.Categorical(['b', 'b', 'a', 'c'],
... categories=['c', 'b', 'a'],
... ordered=True)
>>> cat.argsort()
array([3, 0, 1, 2])
"""
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Series.sort_values
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
"""
For correctly ranking ordered categorical data. See GH#15420
Ordered categorical data should be ranked on the basis of
codes with -1 translated to NaN.
Returns
-------
numpy array
"""
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series
If a scalar value is passed it is used to fill all missing values.
Alternatively, a Series or dict can be used to fill in different
values for each index. The value should not be a list. The
value(s) passed should either be in the categories or should be
NaN.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
"""
Take elements from the Categorical.
Parameters
----------
indexer : sequence of integers
allow_fill : bool, default None.
How to handle negative values in `indexer`.
* False: negative values in `indices` indicate positional indices
from the right. This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate missing values
(the default). These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
.. versionchanged:: 0.23.0
Deprecated the default value of `allow_fill`. The deprecated
default is ``True``. In the future, this will change to
``False``.
Returns
-------
Categorical
This Categorical will have the same categories and ordered as
`self`.
"""
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values().tolist())
def __contains__(self, key):
"""Returns True if `key` is in this Categorical."""
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pandas-dev/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
"""
Compute the inverse of a categorical, returning
a dict of categories -> indexers.
*This is an internal function*
Returns
-------
dict of categories -> indexers
Example
-------
In [1]: c = pd.Categorical(list('aabca'))
In [2]: c
Out[2]:
[a, a, b, c, a]
Categories (3, object): [a, b, c]
In [3]: c.categories
Out[3]: Index([u'a', u'b', u'c'], dtype='object')
In [4]: c.codes
Out[4]: array([0, 0, 1, 2, 0], dtype=int8)
In [5]: c._reverse_indexer()
Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
"""
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
# reduction ops #
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
"""
Returns the mode(s) of the Categorical.
Always returns `Categorical` even if only one value.
Parameters
----------
dropna : boolean, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
Examples
--------
An unordered Categorical will return categories in the
order of appearance.
>>> pd.Categorical(list('baabc'))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.Categorical(list('baabc'), categories=list('abc'))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)
[b, a, c]
Categories (3, object): [a < b < c]
See Also
--------
unique
CategoricalIndex.unique
Series.unique
"""
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
# fastpath to avoid re-coding
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
"""
Check whether `values` are contained in Categorical.
Return a boolean NumPy Array showing whether each element in
the Categorical matches an element in the passed sequence of
`values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
Returns
-------
isin : numpy.ndarray (bool dtype)
Raises
------
TypeError
* If `values` is not a set or list-like
See Also
--------
pandas.Series.isin : equivalent method on Series
Examples
--------
>>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'])
>>> s.isin(['cow', 'lama'])
array([ True, True, True, False, True, False])
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
array([ True, False, True, False, True, False])
"""
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
# The Series.cat accessor
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Parameters
----------
data : Series or CategoricalIndex
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
| 34.122711
| 79
| 0.576176
|
import numpy as np
from warnings import warn
import textwrap
from pandas import compat
from pandas.compat import u, lzip
from pandas._libs import lib, algos as libalgos
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.cast import (
maybe_infer_to_datetimelike,
coerce_indexer_dtype)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_platform_int,
is_extension_array_dtype,
is_dtype_equal,
is_datetimelike,
is_datetime64_dtype,
is_timedelta64_dtype,
is_categorical,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
is_list_like, is_sequence,
is_scalar, is_iterator,
is_dict_like)
from pandas.core.algorithms import factorize, take_1d, unique1d, take
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.base import (PandasObject,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender, cache_readonly, deprecate_kwarg, Substitution)
import pandas.core.algorithms as algorithms
from pandas.io.formats import console
from pandas.io.formats.terminal import get_terminal_size
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.config import get_option
from .base import ExtensionArray
_take_msg = textwrap.dedent("""\
Interpreting negative values in 'indexer' as missing values.
In the future, this will change to meaning positional indices
from the right.
Use 'allow_fill=True' to retain the previous behavior and silence this
warning.
Use 'allow_fill=False' to accept the new behavior.""")
def _cat_compare_op(op):
def f(self, other):
if isinstance(other, ABCSeries):
return NotImplemented
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
msg = ("Categoricals can only be compared if "
"'categories' are the same.")
if len(self.categories) != len(other.categories):
raise TypeError(msg + " Categories are different lengths")
elif (self.ordered and not (self.categories ==
other.categories).all()):
raise TypeError(msg)
elif not set(self.categories) == set(other.categories):
raise TypeError(msg)
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
if not self.ordered and not self.categories.equals(
other.categories):
other_codes = _get_codes_for_values(other, self.categories)
else:
other_codes = other._codes
na_mask = (self._codes == -1) | (other_codes == -1)
f = getattr(self._codes, op)
ret = f(other_codes)
if na_mask.any():
ret[na_mask] = False
return ret
other = lib.item_from_zerodim(other)
if is_scalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def _maybe_to_categorical(array):
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
def contains(cat, key, container):
hash(key)
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
is_scalar(loc):
return loc in container
else:
return any(loc_ in container for loc_ in loc)
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
class Categorical(ExtensionArray, PandasObject):
__array_priority__ = 1000
_dtype = CategoricalDtype(ordered=False)
_deprecations = frozenset(['labels'])
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=None, dtype=None,
fastpath=False):
if dtype is not None:
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown `dtype` {dtype}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify both `dtype` and `categories`"
" or `ordered`.")
categories = dtype.categories
elif is_categorical(values):
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
dtype = CategoricalDtype(categories, ordered)
if fastpath:
self._codes = coerce_indexer_dtype(values, categories)
self._dtype = self._dtype.update_dtype(dtype)
return
null_mask = np.array(False)
if is_categorical_dtype(values):
if dtype.categories is None:
dtype = CategoricalDtype(values.categories, dtype.ordered)
elif not isinstance(values, (ABCIndexClass, ABCSeries)):
values = maybe_infer_to_datetimelike(values, convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
if len(values) == 0:
sanitize_dtype = 'object'
else:
sanitize_dtype = None
null_mask = isna(values)
if null_mask.any():
values = [values[idx] for idx in np.where(~null_mask)[0]]
values = _sanitize_array(values, None, dtype=sanitize_dtype)
if dtype.categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if dtype.ordered:
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
# we're inferring from values
dtype = CategoricalDtype(categories, dtype.ordered)
elif is_categorical_dtype(values):
old_codes = (values.cat.codes if isinstance(values, ABCSeries)
else values.codes)
codes = _recode_for_categories(old_codes, values.dtype.categories,
dtype.categories)
else:
codes = _get_codes_for_values(values, dtype.categories)
if null_mask.any():
full_codes = - np.ones(null_mask.shape, dtype=codes.dtype)
full_codes[~null_mask] = codes
codes = full_codes
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)
@property
def categories(self):
return self.dtype.categories
@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (self.dtype.categories is not None and
len(self.dtype.categories) != len(new_dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items as the old categories!")
self._dtype = new_dtype
@property
def ordered(self):
return self.dtype.ordered
@property
def dtype(self):
return self._dtype
@property
def _ndarray_values(self):
return self.codes
@property
def _constructor(self):
return Categorical
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)
def copy(self):
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True)
def astype(self, dtype, copy=True):
if is_categorical_dtype(dtype):
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy)
@cache_readonly
def ndim(self):
return self._codes.ndim
@cache_readonly
def size(self):
return len(self)
@cache_readonly
def itemsize(self):
return self.categories.itemsize
def tolist(self):
return list(self)
@property
def base(self):
return None
@classmethod
def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype):
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors='coerce')
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors='coerce')
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors='coerce')
if known_categories:
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True)
@classmethod
def from_codes(cls, codes, categories, ordered=False):
codes = np.asarray(codes) if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
try:
codes = coerce_indexer_dtype(codes, categories)
except (ValueError, TypeError):
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = CategoricalDtype.validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _set_categories(self, categories, fastpath=False):
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype
def _set_dtype(self, dtype):
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True)
def set_ordered(self, value, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat
def as_ordered(self, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
self._codes[self._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(self.codes, self.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1:
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat
def map(self, mapper):
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
@property
def shape(self):
return tuple([len(self._codes)])
def shift(self, periods):
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return self.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
if is_extension_array_dtype(ret):
# When we're a Categorical[ExtensionArray], like Interval,
# ndarray.
ret = np.asarray(ret)
return ret
def __setstate__(self, state):
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_categories' not in state and '_levels' in state:
state['_categories'] = self.dtype.validate_categories(state.pop(
'_levels'))
if '_codes' not in state and 'labels' in state:
state['_codes'] = coerce_indexer_dtype(
state.pop('labels'), state['_categories'])
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
# 0.21.0 CategoricalDtype change
if '_dtype' not in state:
state['_dtype'] = CategoricalDtype(state['_categories'],
state['_ordered'])
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self.dtype.categories.values.nbytes
def memory_usage(self, deep=False):
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep)
@Substitution(klass='Categorical')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = _get_codes_for_values(Series(value).values,
self.categories)
if -1 in values_as_codes:
raise ValueError("Value(s) to be inserted must be in categories.")
return self.codes.searchsorted(values_as_codes, side=side,
sorter=sorter)
def isna(self):
ret = self._codes == -1
return ret
isnull = isna
def notna(self):
return ~self.isna()
notnull = notna
def put(self, *args, **kwargs):
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
result = self[self.notna()]
return result
def value_counts(self, dropna=True):
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def _values_for_argsort(self):
return self._codes.copy()
def argsort(self, *args, **kwargs):
# TODO(PY2): use correct signature
# We have to do *args, **kwargs to avoid a a py2-only signature
# issue since np.argsort differs from argsort.
# Keep the implementation here just for the docstring.
return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return self._constructor(values=codes, dtype=self.dtype,
fastpath=True)
def _values_for_rank(self):
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values
def ravel(self, order='C'):
return np.array(self)
def view(self):
return self
def to_dense(self):
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None):
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
if isna(fill_value):
# For categorical, any NA value is considered a user-facing
# NA value. Our storage NA value is -1.
fill_value = -1
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = self._constructor(codes, dtype=self.dtype, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self):
return len(self._codes)
def __iter__(self):
return iter(self.get_values().tolist())
def __contains__(self, key):
# if key is a NaN, check if any NaN is in self.
if isna(key):
return self.isna().any()
return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True):
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:])
if footer:
result = u('{result}\n{footer}').format(result=result,
footer=self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.io.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories ({length}, {dtype}): ".format(
length=len(self.categories), dtype=dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if console.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: {length}\n{info}').format(
length=len(self), info=self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.io.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
msg = self._get_repr(length=False, footer=True).replace("\n", ", ")
result = ('[], {repr_msg}'.format(repr_msg=msg))
return result
def _maybe_coerce_indexer(self, indexer):
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return self._constructor(values=self._codes[key],
dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value):
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
if len(to_add) and not isna(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
if isinstance(key, (int, np.integer)):
pass
elif isinstance(key, tuple):
if len(key) == 2:
if not com.is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
elif isinstance(key, slice):
pass
else:
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
def _reverse_indexer(self):
categories = self.categories
r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'),
categories.size)
counts = counts.cumsum()
result = [r[counts[indexer]:counts[indexer + 1]]
for indexer in range(len(counts) - 1)]
result = dict(zip(categories, result))
return result
def _reduce(self, name, axis=0, skipna=True, **kwargs):
func = getattr(self, name, None)
if func is None:
msg = 'Categorical cannot perform the operation {op}'
raise TypeError(msg.format(op=name))
return func(**kwargs)
def min(self, numeric_only=None, **kwargs):
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self, dropna=True):
import pandas._libs.hashtable as htable
codes = self._codes
if dropna:
good = self._codes != -1
codes = self._codes[good]
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self):
unique_codes = unique1d(self.codes)
cat = self.copy()
cat._codes = unique_codes
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = np.sort(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def _values_for_factorize(self):
codes = self.codes.astype('int64')
return codes, -1
@classmethod
def _from_factorized(cls, uniques, original):
return original._constructor(original.categories.take(uniques),
categories=original.categories,
ordered=original.ordered)
def equals(self, other):
if self.is_dtype_equal(other):
if self.categories.equals(other.categories):
other_codes = other._codes
else:
other_codes = _recode_for_categories(other.codes,
other.categories,
self.categories)
return np.array_equal(self._codes, other_codes)
return False
def is_dtype_equal(self, other):
try:
return hash(self.dtype) == hash(other.dtype)
except (AttributeError, TypeError):
return False
def describe(self):
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.core.reshape.concat import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
@property
def _can_hold_na(self):
return True
@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import _concat_categorical
return _concat_categorical(to_concat)
def _formatting_values(self):
return self
def isin(self, values):
from pandas.core.series import _sanitize_array
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]"
.format(values_type=type(values).__name__))
values = _sanitize_array(values, None, None)
null_mask = np.asarray(isna(values))
code_values = self.categories.get_indexer(values)
code_values = code_values[null_mask | (code_values >= 0)]
return algorithms.isin(self.codes, code_values)
@delegate_names(delegate=Categorical,
accessors=["categories", "ordered"],
typ="property")
@delegate_names(delegate=Categorical,
accessors=["rename_categories", "reorder_categories",
"add_categories", "remove_categories",
"remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"],
typ="method")
class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin):
def __init__(self, data):
self._validate(data)
self._parent = data.values
self.index = data.index
self.name = data.name
self._freeze()
@staticmethod
def _validate(data):
if not is_categorical_dtype(data.dtype):
raise AttributeError("Can only use .cat accessor with a "
"'category' dtype")
def _delegate_property_get(self, name):
return getattr(self._parent, name)
def _delegate_property_set(self, name, new_values):
return setattr(self._parent, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self._parent.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self._parent, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index, name=self.name)
def _get_codes_for_values(values, categories):
from pandas.core.algorithms import _get_data_algo, _hashtables
if is_dtype_equal(values.dtype, categories.dtype):
values = getattr(values, 'values', values)
categories = getattr(categories, 'values', categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
def _recode_for_categories(codes, old_categories, new_categories):
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
is_iterator(list_like)):
return list(list_like)
elif is_scalar(list_like):
return [list_like]
else:
return [list_like]
def _factorize_from_iterable(values):
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories,
categories=values.categories,
ordered=values.ordered)
codes = values.codes
else:
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
def _factorize_from_iterables(iterables):
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
| true
| true
|
79070c2d3b2c86b7e751cc1403b35459fe05349e
| 163,986
|
py
|
Python
|
python/ccxt/okex.py
|
pphszx/ccxt
|
5df54f840a4144d7efad5fd02190e2239f325ec9
|
[
"MIT"
] | 1
|
2021-02-10T21:29:07.000Z
|
2021-02-10T21:29:07.000Z
|
python/ccxt/okex.py
|
niki-johnson/ccxt
|
8dd609995c5462a32e505210047d4fa5d41c53c8
|
[
"MIT"
] | null | null | null |
python/ccxt/okex.py
|
niki-johnson/ccxt
|
8dd609995c5462a32e505210047d4fa5d41c53c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
class okex(Exchange):
def describe(self):
return self.deep_extend(super(okex, self).describe(), {
'id': 'okex',
'name': 'OKEX',
'countries': ['CN', 'US'],
'version': 'v3',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'pro': True,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': False, # see below
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': True,
'fetchTime': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
'futures': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2678400',
'3M': '8035200',
'6M': '16070400',
'1y': '31536000',
},
'hostname': 'okex.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/32552768-0d6dd3c6-c4a6-11e7-90f8-c043b64756a7.jpg',
'api': {
'rest': 'https://www.{hostname}',
},
'www': 'https://www.okex.com',
'doc': 'https://www.okex.com/docs/en/',
'fees': 'https://www.okex.com/pages/products/fees.html',
'referral': 'https://www.okex.com/join/1888677',
'test': {
'rest': 'https://testnet.okex.com',
},
},
'api': {
'general': {
'get': [
'time',
],
},
'account': {
'get': [
'wallet',
'sub-account',
'asset-valuation',
'wallet/{currency}',
'withdrawal/history',
'withdrawal/history/{currency}',
'ledger',
'deposit/address',
'deposit/history',
'deposit/history/{currency}',
'currencies',
'withdrawal/fee',
],
'post': [
'transfer',
'withdrawal',
],
},
'spot': {
'get': [
'accounts',
'accounts/{currency}',
'accounts/{currency}/ledger',
'orders',
'orders_pending',
'orders/{order_id}',
'orders/{client_oid}',
'trade_fee',
'fills',
'algo',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
],
'post': [
'order_algo',
'orders',
'batch_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_algos',
'cancel_batch_orders',
],
},
'margin': {
'get': [
'accounts',
'accounts/{instrument_id}',
'accounts/{instrument_id}/ledger',
'accounts/availability',
'accounts/{instrument_id}/availability',
'accounts/borrowed',
'accounts/{instrument_id}/borrowed',
'orders',
'accounts/{instrument_id}/leverage',
'orders/{order_id}',
'orders/{client_oid}',
'orders_pending',
'fills',
# public
'instruments/{instrument_id}/mark_price',
],
'post': [
'accounts/borrow',
'accounts/repayment',
'orders',
'batch_orders',
'cancel_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_orders',
'accounts/{instrument_id}/leverage',
],
},
'futures': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'accounts/{underlying}',
'accounts/{underlying}/leverage',
'accounts/{underlying}/ledger',
'order_algo/{instrument_id}',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'trade_fee',
'accounts/{instrument_id}/holds',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/estimated_price',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/liquidation',
],
'post': [
'accounts/{underlying}/leverage',
'order',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'accounts/margin_mode',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'swap': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'{instrument_id}/accounts',
'accounts/{instrument_id}/settings',
'accounts/{instrument_id}/ledger',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'accounts/{instrument_id}/holds',
'trade_fee',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/depth',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/liquidation',
'instruments/{instrument_id}/funding_time',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/historical_funding_rate',
],
'post': [
'accounts/{instrument_id}/leverage',
'order',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'order_algo',
'cancel_algos',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'option': {
'get': [
'accounts',
'position',
'{underlying}/position',
'accounts/{underlying}',
'orders/{underlying}',
'fills/{underlying}',
'accounts/{underlying}/ledger',
'trade_fee',
'orders/{underlying}/{order_id}',
'orders/{underlying}/{client_oid}',
# public
'underlying',
'instruments/{underlying}',
'instruments/{underlying}/summary',
'instruments/{underlying}/summary/{instrument_id}',
'instruments/{instrument_id}/book',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/candles',
],
'post': [
'order',
'orders',
'cancel_order/{underlying}/{order_id}',
'cancel_order/{underlying}/{client_oid}',
'cancel_batch_orders/{underlying}',
'amend_order/{underlying}',
'amend_batch_orders/{underlying}',
],
},
'index': {
'get': [
'{instrument_id}/constituents',
],
},
},
'fees': {
'trading': {
'taker': 0.0015,
'maker': 0.0010,
},
'spot': {
'taker': 0.0015,
'maker': 0.0010,
},
'futures': {
'taker': 0.0005,
'maker': 0.0002,
},
'swap': {
'taker': 0.00075,
'maker': 0.00020,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request — Invalid request format
# 401 Unauthorized — Invalid API Key
# 403 Forbidden — You do not have access to the requested resource
# 404 Not Found
# 429 Client Error: Too Many Requests for url
# 500 Internal Server Error — We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'Server is busy, please try again.': ExchangeNotAvailable, # {"message": "Server is busy, please try again."}
'An unexpected error occurred': ExchangeError, # {"message": "An unexpected error occurred"}
'System error': ExchangeError, # {"error_message":"System error","message":"System error"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
# '0': ExchangeError, # 200 successful,when the order placement / cancellation / operation is successful
'4001': ExchangeError, # no data received in 30s
'4002': ExchangeError, # Buffer full. cannot write data
# --------------------------------------------------------
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeNotAvailable, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not supported in self website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# '30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30038': OnMaintenance, # {"client_oid":"","code":"30038","error_code":"30038","error_message":"Matching engine is being upgraded. Please try in about 1 minute.","message":"Matching engine is being upgraded. Please try in about 1 minute.","order_id":"-1","result":false}
'30044': RequestTimeout, # {"code":30044, "message":"Endpoint request timeout"}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please try again"}
'32019': ExchangeError, # {"code": 32019, "message": "the order price deviates from the price of the previous minute by more than 3%"}
'32020': ExchangeError, # {"code": 32020, "message": "the price is not in the range of the price limit"}
'32021': ExchangeError, # {"code": 32021, "message": "leverage error"}
'32022': ExchangeError, # {"code": 32022, "message": "self function is not supported in your country or region according to the regulations"}
'32023': ExchangeError, # {"code": 32023, "message": "self account has outstanding loan"}
'32024': ExchangeError, # {"code": 32024, "message": "order cannot be placed during delivery"}
'32025': ExchangeError, # {"code": 32025, "message": "order cannot be placed during settlement"}
'32026': ExchangeError, # {"code": 32026, "message": "your account is restricted from opening positions"}
'32027': ExchangeError, # {"code": 32027, "message": "cancelled over 20 orders"}
'32028': ExchangeError, # {"code": 32028, "message": "account is suspended and liquidated"}
'32029': ExchangeError, # {"code": 32029, "message": "order info does not exist"}
'32030': InvalidOrder, # The order cannot be cancelled
'32031': ArgumentsRequired, # client_oid or order_id is required.
'32038': AuthenticationError, # User does not exist
'32040': ExchangeError, # User have open contract orders or position
'32044': ExchangeError, # {"code": 32044, "message": "The margin ratio after submitting self order is lower than the minimum requirement({0}) for your tier."}
'32045': ExchangeError, # String of commission over 1 million
'32046': ExchangeError, # Each user can hold up to 10 trade plans at the same time
'32047': ExchangeError, # system error
'32048': InvalidOrder, # Order strategy track range error
'32049': ExchangeError, # Each user can hold up to 10 track plans at the same time
'32050': InvalidOrder, # Order strategy rang error
'32051': InvalidOrder, # Order strategy ice depth error
'32052': ExchangeError, # String of commission over 100 thousand
'32053': ExchangeError, # Each user can hold up to 6 ice plans at the same time
'32057': ExchangeError, # The order price is zero. Market-close-all function cannot be executed
'32054': ExchangeError, # Trade not allow
'32055': InvalidOrder, # cancel order error
'32056': ExchangeError, # iceberg per order average should between {0}-{1} contracts
'32058': ExchangeError, # Each user can hold up to 6 initiative plans at the same time
'32059': InvalidOrder, # Total amount should exceed per order amount
'32060': InvalidOrder, # Order strategy type error
'32061': InvalidOrder, # Order strategy initiative limit error
'32062': InvalidOrder, # Order strategy initiative range error
'32063': InvalidOrder, # Order strategy initiative rate error
'32064': ExchangeError, # Time Stringerval of orders should set between 5-120s
'32065': ExchangeError, # Close amount exceeds the limit of Market-close-all(999 for BTC, and 9999 for the rest tokens)
'32066': ExchangeError, # You have open orders. Please cancel all open orders before changing your leverage level.
'32067': ExchangeError, # Account equity < required margin in self setting. Please adjust your leverage level again.
'32068': ExchangeError, # The margin for self position will fall short of the required margin in self setting. Please adjust your leverage level or increase your margin to proceed.
'32069': ExchangeError, # Target leverage level too low. Your account balance is insufficient to cover the margin required. Please adjust the leverage level again.
'32070': ExchangeError, # Please check open position or unfilled order
'32071': ExchangeError, # Your current liquidation mode does not support self action.
'32072': ExchangeError, # The highest available margin for your order’s tier is {0}. Please edit your margin and place a new order.
'32073': ExchangeError, # The action does not apply to the token
'32074': ExchangeError, # The number of contracts of your position, open orders, and the current order has exceeded the maximum order limit of self asset.
'32075': ExchangeError, # Account risk rate breach
'32076': ExchangeError, # Liquidation of the holding position(s) at market price will require cancellation of all pending close orders of the contracts.
'32077': ExchangeError, # Your margin for self asset in futures account is insufficient and the position has been taken over for liquidation.(You will not be able to place orders, close positions, transfer funds, or add margin during self period of time. Your account will be restored after the liquidation is complete.)
'32078': ExchangeError, # Please cancel all open orders before switching the liquidation mode(Please cancel all open orders before switching the liquidation mode)
'32079': ExchangeError, # Your open positions are at high risk.(Please add margin or reduce positions before switching the mode)
'32080': ExchangeError, # Funds cannot be transferred out within 30 minutes after futures settlement
'32083': ExchangeError, # The number of contracts should be a positive multiple of %%. Please place your order again
# token and margin trading
'33001': PermissionDenied, # {"code": 33001, "message": "margin account for self pair is not enabled yet"}
'33002': AccountSuspended, # {"code": 33002, "message": "margin account for self pair is suspended"}
'33003': InsufficientFunds, # {"code": 33003, "message": "no loan balance"}
'33004': ExchangeError, # {"code": 33004, "message": "loan amount cannot be smaller than the minimum limit"}
'33005': ExchangeError, # {"code": 33005, "message": "repayment amount must exceed 0"}
'33006': ExchangeError, # {"code": 33006, "message": "loan order not found"}
'33007': ExchangeError, # {"code": 33007, "message": "status not found"}
'33008': InsufficientFunds, # {"code": 33008, "message": "loan amount cannot exceed the maximum limit"}
'33009': ExchangeError, # {"code": 33009, "message": "user ID is blank"}
'33010': ExchangeError, # {"code": 33010, "message": "you cannot cancel an order during session 2 of call auction"}
'33011': ExchangeError, # {"code": 33011, "message": "no new market data"}
'33012': ExchangeError, # {"code": 33012, "message": "order cancellation failed"}
'33013': InvalidOrder, # {"code": 33013, "message": "order placement failed"}
'33014': OrderNotFound, # {"code": 33014, "message": "order does not exist"}
'33015': InvalidOrder, # {"code": 33015, "message": "exceeded maximum limit"}
'33016': ExchangeError, # {"code": 33016, "message": "margin trading is not open for self token"}
'33017': InsufficientFunds, # {"code": 33017, "message": "insufficient balance"}
'33018': ExchangeError, # {"code": 33018, "message": "self parameter must be smaller than 1"}
'33020': ExchangeError, # {"code": 33020, "message": "request not supported"}
'33021': BadRequest, # {"code": 33021, "message": "token and the pair do not match"}
'33022': InvalidOrder, # {"code": 33022, "message": "pair and the order do not match"}
'33023': ExchangeError, # {"code": 33023, "message": "you can only place market orders during call auction"}
'33024': InvalidOrder, # {"code": 33024, "message": "trading amount too small"}
'33025': InvalidOrder, # {"code": 33025, "message": "base token amount is blank"}
'33026': ExchangeError, # {"code": 33026, "message": "transaction completed"}
'33027': InvalidOrder, # {"code": 33027, "message": "cancelled order or order cancelling"}
'33028': InvalidOrder, # {"code": 33028, "message": "the decimal places of the trading price exceeded the limit"}
'33029': InvalidOrder, # {"code": 33029, "message": "the decimal places of the trading size exceeded the limit"}
'33034': ExchangeError, # {"code": 33034, "message": "You can only place limit order after Call Auction has started"}
'33035': ExchangeError, # This type of order cannot be canceled(This type of order cannot be canceled)
'33036': ExchangeError, # Exceeding the limit of entrust order
'33037': ExchangeError, # The buy order price should be lower than 130% of the trigger price
'33038': ExchangeError, # The sell order price should be higher than 70% of the trigger price
'33039': ExchangeError, # The limit of callback rate is 0 < x <= 5%
'33040': ExchangeError, # The trigger price of a buy order should be lower than the latest transaction price
'33041': ExchangeError, # The trigger price of a sell order should be higher than the latest transaction price
'33042': ExchangeError, # The limit of price variance is 0 < x <= 1%
'33043': ExchangeError, # The total amount must be larger than 0
'33044': ExchangeError, # The average amount should be 1/1000 * total amount <= x <= total amount
'33045': ExchangeError, # The price should not be 0, including trigger price, order price, and price limit
'33046': ExchangeError, # Price variance should be 0 < x <= 1%
'33047': ExchangeError, # Sweep ratio should be 0 < x <= 100%
'33048': ExchangeError, # Per order limit: Total amount/1000 < x <= Total amount
'33049': ExchangeError, # Total amount should be X > 0
'33050': ExchangeError, # Time interval should be 5 <= x <= 120s
'33051': ExchangeError, # cancel order number not higher limit: plan and track entrust no more than 10, ice and time entrust no more than 6
'33059': BadRequest, # {"code": 33059, "message": "client_oid or order_id is required"}
'33060': BadRequest, # {"code": 33060, "message": "Only fill in either parameter client_oid or order_id"}
'33061': ExchangeError, # Value of a single market price order cannot exceed 100,000 USD
'33062': ExchangeError, # The leverage ratio is too high. The borrowed position has exceeded the maximum position of self leverage ratio. Please readjust the leverage ratio
'33063': ExchangeError, # Leverage multiple is too low, there is insufficient margin in the account, please readjust the leverage ratio
'33064': ExchangeError, # The setting of the leverage ratio cannot be less than 2, please readjust the leverage ratio
'33065': ExchangeError, # Leverage ratio exceeds maximum leverage ratio, please readjust leverage ratio
'33085': InvalidOrder, # The value of the position and buying order has reached the position limit, and no further buying is allowed.
# account
'21009': ExchangeError, # Funds cannot be transferred out within 30 minutes after swap settlement(Funds cannot be transferred out within 30 minutes after swap settlement)
'34001': PermissionDenied, # {"code": 34001, "message": "withdrawal suspended"}
'34002': InvalidAddress, # {"code": 34002, "message": "please add a withdrawal address"}
'34003': ExchangeError, # {"code": 34003, "message": "sorry, self token cannot be withdrawn to xx at the moment"}
'34004': ExchangeError, # {"code": 34004, "message": "withdrawal fee is smaller than minimum limit"}
'34005': ExchangeError, # {"code": 34005, "message": "withdrawal fee exceeds the maximum limit"}
'34006': ExchangeError, # {"code": 34006, "message": "withdrawal amount is lower than the minimum limit"}
'34007': ExchangeError, # {"code": 34007, "message": "withdrawal amount exceeds the maximum limit"}
'34008': InsufficientFunds, # {"code": 34008, "message": "insufficient balance"}
'34009': ExchangeError, # {"code": 34009, "message": "your withdrawal amount exceeds the daily limit"}
'34010': ExchangeError, # {"code": 34010, "message": "transfer amount must be larger than 0"}
'34011': ExchangeError, # {"code": 34011, "message": "conditions not met"}
'34012': ExchangeError, # {"code": 34012, "message": "the minimum withdrawal amount for NEO is 1, and the amount must be an integer"}
'34013': ExchangeError, # {"code": 34013, "message": "please transfer"}
'34014': ExchangeError, # {"code": 34014, "message": "transfer limited"}
'34015': ExchangeError, # {"code": 34015, "message": "subaccount does not exist"}
'34016': PermissionDenied, # {"code": 34016, "message": "transfer suspended"}
'34017': AccountSuspended, # {"code": 34017, "message": "account suspended"}
'34018': AuthenticationError, # {"code": 34018, "message": "incorrect trades password"}
'34019': PermissionDenied, # {"code": 34019, "message": "please bind your email before withdrawal"}
'34020': PermissionDenied, # {"code": 34020, "message": "please bind your funds password before withdrawal"}
'34021': InvalidAddress, # {"code": 34021, "message": "Not verified address"}
'34022': ExchangeError, # {"code": 34022, "message": "Withdrawals are not available for sub accounts"}
'34023': PermissionDenied, # {"code": 34023, "message": "Please enable futures trading before transferring your funds"}
'34026': RateLimitExceeded, # transfer too frequently(transfer too frequently)
'34036': ExchangeError, # Parameter is incorrect, please refer to API documentation
'34037': ExchangeError, # Get the sub-account balance interface, account type is not supported
'34038': ExchangeError, # Since your C2C transaction is unusual, you are restricted from fund transfer. Please contact our customer support to cancel the restriction
'34039': ExchangeError, # You are now restricted from transferring out your funds due to abnormal trades on C2C Market. Please transfer your fund on our website or app instead to verify your identity
# swap
'35001': ExchangeError, # {"code": 35001, "message": "Contract does not exist"}
'35002': ExchangeError, # {"code": 35002, "message": "Contract settling"}
'35003': ExchangeError, # {"code": 35003, "message": "Contract paused"}
'35004': ExchangeError, # {"code": 35004, "message": "Contract pending settlement"}
'35005': AuthenticationError, # {"code": 35005, "message": "User does not exist"}
'35008': InvalidOrder, # {"code": 35008, "message": "Risk ratio too high"}
'35010': InvalidOrder, # {"code": 35010, "message": "Position closing too large"}
'35012': InvalidOrder, # {"code": 35012, "message": "Incorrect order size"}
'35014': InvalidOrder, # {"code": 35014, "message": "Order price is not within limit"}
'35015': InvalidOrder, # {"code": 35015, "message": "Invalid leverage level"}
'35017': ExchangeError, # {"code": 35017, "message": "Open orders exist"}
'35019': InvalidOrder, # {"code": 35019, "message": "Order size too large"}
'35020': InvalidOrder, # {"code": 35020, "message": "Order price too high"}
'35021': InvalidOrder, # {"code": 35021, "message": "Order size exceeded current tier limit"}
'35022': BadRequest, # {"code": 35022, "message": "Contract status error"}
'35024': BadRequest, # {"code": 35024, "message": "Contract not initialized"}
'35025': InsufficientFunds, # {"code": 35025, "message": "No account balance"}
'35026': BadRequest, # {"code": 35026, "message": "Contract settings not initialized"}
'35029': OrderNotFound, # {"code": 35029, "message": "Order does not exist"}
'35030': InvalidOrder, # {"code": 35030, "message": "Order size too large"}
'35031': InvalidOrder, # {"code": 35031, "message": "Cancel order size too large"}
'35032': ExchangeError, # {"code": 35032, "message": "Invalid user status"}
'35037': ExchangeError, # No last traded price in cache
'35039': ExchangeError, # {"code": 35039, "message": "Open order quantity exceeds limit"}
'35040': InvalidOrder, # {"error_message":"Invalid order type","result":"true","error_code":"35040","order_id":"-1"}
'35044': ExchangeError, # {"code": 35044, "message": "Invalid order status"}
'35046': InsufficientFunds, # {"code": 35046, "message": "Negative account balance"}
'35047': InsufficientFunds, # {"code": 35047, "message": "Insufficient account balance"}
'35048': ExchangeError, # {"code": 35048, "message": "User contract is frozen and liquidating"}
'35049': InvalidOrder, # {"code": 35049, "message": "Invalid order type"}
'35050': InvalidOrder, # {"code": 35050, "message": "Position settings are blank"}
'35052': InsufficientFunds, # {"code": 35052, "message": "Insufficient cross margin"}
'35053': ExchangeError, # {"code": 35053, "message": "Account risk too high"}
'35055': InsufficientFunds, # {"code": 35055, "message": "Insufficient account balance"}
'35057': ExchangeError, # {"code": 35057, "message": "No last traded price"}
'35058': ExchangeError, # {"code": 35058, "message": "No limit"}
'35059': BadRequest, # {"code": 35059, "message": "client_oid or order_id is required"}
'35060': BadRequest, # {"code": 35060, "message": "Only fill in either parameter client_oid or order_id"}
'35061': BadRequest, # {"code": 35061, "message": "Invalid instrument_id"}
'35062': InvalidOrder, # {"code": 35062, "message": "Invalid match_price"}
'35063': InvalidOrder, # {"code": 35063, "message": "Invalid order_size"}
'35064': InvalidOrder, # {"code": 35064, "message": "Invalid client_oid"}
'35066': InvalidOrder, # Order interval error
'35067': InvalidOrder, # Time-weighted order ratio error
'35068': InvalidOrder, # Time-weighted order range error
'35069': InvalidOrder, # Time-weighted single transaction limit error
'35070': InvalidOrder, # Algo order type error
'35071': InvalidOrder, # Order total must be larger than single order limit
'35072': InvalidOrder, # Maximum 6 unfulfilled time-weighted orders can be held at the same time
'35073': InvalidOrder, # Order price is 0. Market-close-all not available
'35074': InvalidOrder, # Iceberg order single transaction average error
'35075': InvalidOrder, # Failed to cancel order
'35076': InvalidOrder, # LTC 20x leverage. Not allowed to open position
'35077': InvalidOrder, # Maximum 6 unfulfilled iceberg orders can be held at the same time
'35078': InvalidOrder, # Order amount exceeded 100,000
'35079': InvalidOrder, # Iceberg order price variance error
'35080': InvalidOrder, # Callback rate error
'35081': InvalidOrder, # Maximum 10 unfulfilled trail orders can be held at the same time
'35082': InvalidOrder, # Trail order callback rate error
'35083': InvalidOrder, # Each user can only hold a maximum of 10 unfulfilled stop-limit orders at the same time
'35084': InvalidOrder, # Order amount exceeded 1 million
'35085': InvalidOrder, # Order amount is not in the correct range
'35086': InvalidOrder, # Price exceeds 100 thousand
'35087': InvalidOrder, # Price exceeds 100 thousand
'35088': InvalidOrder, # Average amount error
'35089': InvalidOrder, # Price exceeds 100 thousand
'35090': ExchangeError, # No stop-limit orders available for cancelation
'35091': ExchangeError, # No trail orders available for cancellation
'35092': ExchangeError, # No iceberg orders available for cancellation
'35093': ExchangeError, # No trail orders available for cancellation
'35094': ExchangeError, # Stop-limit order last traded price error
'35095': BadRequest, # Instrument_id error
'35096': ExchangeError, # Algo order status error
'35097': ExchangeError, # Order status and order ID cannot exist at the same time
'35098': ExchangeError, # An order status or order ID must exist
'35099': ExchangeError, # Algo order ID error
'35102': RateLimitExceeded, # {"error_message":"The operation that close all at market price is too frequent","result":"true","error_code":"35102","order_id":"-1"}
# option
'36001': BadRequest, # Invalid underlying index.
'36002': BadRequest, # Instrument does not exist.
'36005': ExchangeError, # Instrument status is invalid.
'36101': AuthenticationError, # Account does not exist.
'36102': PermissionDenied, # Account status is invalid.
'36103': PermissionDenied, # Account is suspended due to ongoing liquidation.
'36104': PermissionDenied, # Account is not enabled for options trading.
'36105': PermissionDenied, # Please enable the account for option contract.
'36106': PermissionDenied, # Funds cannot be transferred in or out, as account is suspended.
'36107': PermissionDenied, # Funds cannot be transferred out within 30 minutes after option exercising or settlement.
'36108': InsufficientFunds, # Funds cannot be transferred in or out, as equity of the account is less than zero.
'36109': PermissionDenied, # Funds cannot be transferred in or out during option exercising or settlement.
'36201': PermissionDenied, # New order function is blocked.
'36202': PermissionDenied, # Account does not have permission to short option.
'36203': InvalidOrder, # Invalid format for client_oid.
'36204': ExchangeError, # Invalid format for request_id.
'36205': BadRequest, # Instrument id does not match underlying index.
'36206': BadRequest, # Order_id and client_oid can not be used at the same time.
'36207': InvalidOrder, # Either order price or fartouch price must be present.
'36208': InvalidOrder, # Either order price or size must be present.
'36209': InvalidOrder, # Either order_id or client_oid must be present.
'36210': InvalidOrder, # Either order_ids or client_oids must be present.
'36211': InvalidOrder, # Exceeding max batch size for order submission.
'36212': InvalidOrder, # Exceeding max batch size for oder cancellation.
'36213': InvalidOrder, # Exceeding max batch size for order amendment.
'36214': ExchangeError, # Instrument does not have valid bid/ask quote.
'36216': OrderNotFound, # Order does not exist.
'36217': InvalidOrder, # Order submission failed.
'36218': InvalidOrder, # Order cancellation failed.
'36219': InvalidOrder, # Order amendment failed.
'36220': InvalidOrder, # Order is pending cancel.
'36221': InvalidOrder, # Order qty is not valid multiple of lot size.
'36222': InvalidOrder, # Order price is breaching highest buy limit.
'36223': InvalidOrder, # Order price is breaching lowest sell limit.
'36224': InvalidOrder, # Exceeding max order size.
'36225': InvalidOrder, # Exceeding max open order count for instrument.
'36226': InvalidOrder, # Exceeding max open order count for underlying.
'36227': InvalidOrder, # Exceeding max open size across all orders for underlying
'36228': InvalidOrder, # Exceeding max available qty for instrument.
'36229': InvalidOrder, # Exceeding max available qty for underlying.
'36230': InvalidOrder, # Exceeding max position limit for underlying.
},
'broad': {
},
},
'precisionMode': TICK_SIZE,
'options': {
'fetchOHLCV': {
'type': 'Candles', # Candles or HistoryCandles
},
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': ['spot', 'futures', 'swap', 'option'],
'defaultType': 'spot', # 'account', 'spot', 'margin', 'futures', 'swap', 'option'
'auth': {
'time': 'public',
'currencies': 'private',
'instruments': 'public',
'rate': 'public',
'{instrument_id}/constituents': 'public',
},
},
'commonCurrencies': {
# OKEX refers to ERC20 version of Aeternity(AEToken)
'AE': 'AET', # https://github.com/ccxt/ccxt/issues/4981
'BOX': 'DefiBox',
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'SBTC': 'Super Bitcoin',
'YOYO': 'YOYOW',
'WIN': 'WinToken', # https://github.com/ccxt/ccxt/issues/5701
},
})
def fetch_time(self, params={}):
response = self.generalGetTime(params)
#
# {
# "iso": "2015-01-07T23:47:25.201Z",
# "epoch": 1420674445.201
# }
#
return self.parse8601(self.safe_string(response, 'iso'))
def fetch_markets(self, params={}):
types = self.safe_value(self.options, 'fetchMarkets')
result = []
for i in range(0, len(types)):
markets = self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot markets
#
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
#
# futures markets
#
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
#
# swap markets
#
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
#
# options markets
#
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# }
#
id = self.safe_string(market, 'instrument_id')
marketType = 'spot'
spot = True
future = False
swap = False
option = False
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
contractVal = self.safe_float(market, 'contract_val')
if contractVal is not None:
if 'option_type' in market:
marketType = 'option'
spot = False
option = True
underlying = self.safe_string(market, 'underlying')
parts = underlying.split('-')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
else:
marketType = 'swap'
spot = False
swap = True
futuresAlias = self.safe_string(market, 'alias')
if futuresAlias is not None:
swap = False
future = True
marketType = 'futures'
baseId = self.safe_string(market, 'underlying_index')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote) if spot else id
lotSize = self.safe_float_2(market, 'lot_size', 'trade_increment')
precision = {
'amount': self.safe_float(market, 'size_increment', lotSize),
'price': self.safe_float(market, 'tick_size'),
}
minAmount = self.safe_float_2(market, 'min_size', 'base_min_size')
active = True
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': marketType,
'spot': spot,
'futures': future,
'swap': swap,
'option': option,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': precision['price'],
'max': None,
},
},
})
def fetch_markets_by_type(self, type, params={}):
if type == 'option':
underlying = self.optionGetUnderlying(params)
result = []
for i in range(0, len(underlying)):
response = self.optionGetInstrumentsUnderlying({
'underlying': underlying[i],
})
#
# options markets
#
# [
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# },
# ]
#
result = self.array_concat(result, response)
return self.parse_markets(result)
elif (type == 'spot') or (type == 'futures') or (type == 'swap'):
method = type + 'GetInstruments'
response = getattr(self, method)(params)
#
# spot markets
#
# [
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
# ]
#
# futures markets
#
# [
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
# ]
#
# swap markets
#
# [
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
# ]
#
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType does not support market type ' + type)
def fetch_currencies(self, params={}):
# has['fetchCurrencies'] is currently set to False
# despite that their docs say these endpoints are public:
# https://www.okex.com/api/account/v3/withdrawal/fee
# https://www.okex.com/api/account/v3/currencies
# it will still reply with {"code":30001, "message": "OK-ACCESS-KEY header is required"}
# if you attempt to access it without authentication
response = self.accountGetCurrencies(params)
#
# [
# {
# name: '',
# currency: 'BTC',
# can_withdraw: '1',
# can_deposit: '1',
# min_withdrawal: '0.0100000000000000'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
precision = 0.00000001 # default precision, todo: fix "magic constants"
name = self.safe_string(currency, 'name')
canDeposit = self.safe_integer(currency, 'can_deposit')
canWithdraw = self.safe_integer(currency, 'can_withdraw')
active = True if (canDeposit and canWithdraw) else False
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': active,
'fee': None, # todo: redesign
'precision': precision,
'limits': {
'amount': {'min': None, 'max': None},
'price': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_float(currency, 'min_withdrawal'),
'max': None,
},
},
}
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentId'
method += 'Depth' if (market['type'] == 'swap') else 'Book'
request = {
'instrument_id': market['id'],
}
if limit is not None:
request['size'] = limit # max 200
response = getattr(self, method)(self.extend(request, params))
#
# { asks: [["0.02685268", "0.242571", "1"],
# ["0.02685493", "0.164085", "1"],
# ...
# ["0.02779", "1.039", "1"],
# ["0.027813", "0.0876", "1"] ],
# bids: [["0.02684052", "10.371849", "1"],
# ["0.02684051", "3.707", "4"],
# ...
# ["0.02634963", "0.132934", "1"],
# ["0.02634962", "0.264838", "2"] ],
# timestamp: "2018-12-17T20:24:16.159Z" }
#
timestamp = self.parse8601(self.safe_string(response, 'timestamp'))
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472", # missing in the docs
# bid: "0.02665221", # not mentioned in the docs
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
symbol = None
marketId = self.safe_string(ticker, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
open = self.safe_float(ticker, 'open_24h')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high_24h'),
'low': self.safe_float(ticker, 'low_24h'),
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': self.safe_float(ticker, 'best_bid_size'),
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': self.safe_float(ticker, 'best_ask_size'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'base_volume_24h'),
'quoteVolume': self.safe_float(ticker, 'quote_volume_24h'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTicker'
request = {
'instrument_id': market['id'],
}
response = getattr(self, method)(self.extend(request, params))
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472",
# bid: "0.02665221",
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
return self.parse_ticker(response)
def fetch_tickers_by_type(self, type, symbols=None, params={}):
self.load_markets()
method = type + 'GetInstrumentsTicker'
response = getattr(self, method)(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot trades
#
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
#
# futures trades, swap trades
#
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
#
# fetchOrderTrades(private)
#
# spot trades, margin trades
#
# {
# "created_at":"2019-03-15T02:52:56.000Z",
# "exec_type":"T", # whether the order is taker or maker
# "fee":"0.00000082",
# "instrument_id":"BTC-USDT",
# "ledger_id":"3963052721",
# "liquidity":"T", # whether the order is taker or maker
# "order_id":"2482659399697408",
# "price":"3888.6",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.00055306",
# "timestamp":"2019-03-15T02:52:56.000Z"
# },
#
# futures trades, swap trades
#
# {
# "trade_id":"197429674631450625",
# "instrument_id":"EOS-USD-SWAP",
# "order_id":"6a-7-54d663a28-0",
# "price":"3.633",
# "order_qty":"1.0000",
# "fee":"-0.000551",
# "created_at":"2019-03-21T04:41:58.0Z", # missing in swap trades
# "timestamp":"2019-03-25T05:56:31.287Z", # missing in futures trades
# "exec_type":"M", # whether the order is taker or maker
# "side":"short", # "buy" in futures trades
# }
#
symbol = None
marketId = self.safe_string(trade, 'instrument_id')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'qty')
amount = self.safe_float(trade, 'order_qty', amount)
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
side = self.safe_string(trade, 'side')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
feeCost = self.safe_float(trade, 'fee')
fee = None
if feeCost is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': -feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string_2(trade, 'trade_id', 'ledger_id'),
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTrades'
if (limit is None) or (limit > 100):
limit = 100 # maximum = default = 100
request = {
'instrument_id': market['id'],
'limit': limit,
# from: 'id',
# to: 'id',
}
response = getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
# ]
#
# futures markets, swap markets
#
# [
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# spot markets
#
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
#
# futures markets
#
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331,
# ]
#
if isinstance(ohlcv, list):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
timestamp = self.safe_value(ohlcv, 0)
if isinstance(timestamp, basestring):
timestamp = self.parse8601(timestamp)
return [
timestamp, # timestamp
self.safe_float(ohlcv, 1), # Open
self.safe_float(ohlcv, 2), # High
self.safe_float(ohlcv, 3), # Low
self.safe_float(ohlcv, 4), # Close
# self.safe_float(ohlcv, 5), # Quote Volume
# self.safe_float(ohlcv, 6), # Base Volume
self.safe_float(ohlcv, volumeIndex), # Volume, okex will return base volume in the 7th element for future markets
]
else:
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_float(ohlcv, 'open'), # Open
self.safe_float(ohlcv, 'high'), # High
self.safe_float(ohlcv, 'low'), # Low
self.safe_float(ohlcv, 'close'), # Close
self.safe_float(ohlcv, 'volume'), # Base Volume
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
duration = self.parse_timeframe(timeframe)
request = {
'instrument_id': market['id'],
'granularity': self.timeframes[timeframe],
}
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = market['type'] + 'GetInstrumentsInstrumentId' + type
if type == 'Candles':
if since is not None:
if limit is not None:
request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['start'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
elif type == 'HistoryCandles':
if market['option']:
raise NotSupported(self.id + ' fetchOHLCV does not have ' + type + ' for ' + market['type'] + ' markets')
if since is not None:
if limit is None:
limit = 300 # default
request['start'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['end'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['end'] = self.iso8601(now - limit * duration * 1000)
request['start'] = self.iso8601(now)
response = getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# close: "0.02683401",
# high: "0.02683401",
# low: "0.02683401",
# open: "0.02683401",
# time: "2018-12-17T23:47:00.000Z",
# volume: "0"
# },
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
# ]
#
# futures
#
# [
# [
# 1545090660000,
# 0.3171,
# 0.3174,
# 0.3171,
# 0.3173,
# 1648,
# 51930.38579450868
# ],
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331
# ]
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_account_balance(self, response):
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'hold')
account['free'] = self.safe_float(balance, 'available')
result[code] = account
return self.parse_balance(result)
def parse_margin_balance(self, response):
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
marketId = self.safe_string(balance, 'instrument_id')
market = self.safe_value(self.markets_by_id, marketId)
symbol = None
if market is None:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = market['symbol']
omittedBalance = self.omit(balance, [
'instrument_id',
'liquidation_price',
'product_id',
'risk_rate',
'margin_ratio',
'maint_margin_ratio',
'tiers',
])
keys = list(omittedBalance.keys())
accounts = {}
for k in range(0, len(keys)):
key = keys[k]
marketBalance = balance[key]
if key.find(':') >= 0:
parts = key.split(':')
currencyId = parts[1]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(marketBalance, 'balance')
account['used'] = self.safe_float(marketBalance, 'hold')
account['free'] = self.safe_float(marketBalance, 'available')
accounts[code] = account
else:
raise NotSupported(self.id + ' margin balance response format has changed!')
result[symbol] = self.parse_balance(accounts)
return result
def parse_futures_balance(self, response):
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', {})
ids = list(info.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
balance = self.safe_value(info, id, {})
account = self.account()
totalAvailBalance = self.safe_float(balance, 'total_avail_balance')
if self.safe_string(balance, 'margin_mode') == 'fixed':
contracts = self.safe_value(balance, 'contracts', [])
free = totalAvailBalance
for i in range(0, len(contracts)):
contract = contracts[i]
fixedBalance = self.safe_float(contract, 'fixed_balance')
realizedPnl = self.safe_float(contract, 'realized_pnl')
marginFrozen = self.safe_float(contract, 'margin_frozen')
marginForUnfilled = self.safe_float(contract, 'margin_for_unfilled')
margin = self.sum(fixedBalance, realizedPnl) - marginFrozen - marginForUnfilled
free = self.sum(free, margin)
account['free'] = free
else:
realizedPnl = self.safe_float(balance, 'realized_pnl')
unrealizedPnl = self.safe_float(balance, 'unrealized_pnl')
marginFrozen = self.safe_float(balance, 'margin_frozen')
marginForUnfilled = self.safe_float(balance, 'margin_for_unfilled')
account['free'] = self.sum(totalAvailBalance, realizedPnl, unrealizedPnl) - marginFrozen - marginForUnfilled
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_float(balance, 'equity')
result[code] = account
return self.parse_balance(result)
def parse_swap_balance(self, response):
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', [])
for i in range(0, len(info)):
balance = info[i]
marketId = self.safe_string(balance, 'instrument_id')
symbol = marketId
if marketId in self.markets_by_id:
symbol = self.markets_by_id[marketId]['symbol']
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_float(balance, 'equity')
account['free'] = self.safe_float(balance, 'total_avail_balance')
result[symbol] = account
return self.parse_balance(result)
def fetch_balance(self, params={}):
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance() requires a type parameter(one of 'account', 'spot', 'margin', 'futures', 'swap')")
self.load_markets()
suffix = 'Wallet' if (type == 'account') else 'Accounts'
method = type + 'Get' + suffix
query = self.omit(params, 'type')
response = getattr(self, method)(query)
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
# margin
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
# futures
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# swap
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
return self.parse_balance_by_type(type, response)
def parse_balance_by_type(self, type, response):
if (type == 'account') or (type == 'spot'):
return self.parse_account_balance(response)
elif type == 'margin':
return self.parse_margin_balance(response)
elif type == 'futures':
return self.parse_futures_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef1234567890', # [a-z0-9]{1,32}
# 'order_type': '0', # 0 = Normal limit order, 1 = Post only, 2 = Fill Or Kill, 3 = Immediatel Or Cancel, 4 = Market for futures only
}
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
request['client_oid'] = clientOrderId
params = self.omit(params, ['client_oid', 'clientOrderId'])
method = None
if market['futures'] or market['swap']:
size = self.number_to_string(amount) if market['futures'] else self.amount_to_precision(symbol, amount)
request = self.extend(request, {
'type': type, # 1:open long 2:open short 3:close long 4:close short for futures
'size': size,
# 'match_price': '0', # Order at best counter party price?(0:no 1:yes). The default is 0. If it is set as 1, the price parameter will be ignored. When posting orders at best bid price, order_type can only be 0(regular order).
})
orderType = self.safe_string(params, 'order_type')
# order_type == '4' means a market order
isMarketOrder = (type == 'market') or (orderType == '4')
if isMarketOrder:
request['order_type'] = '4'
else:
request['price'] = self.price_to_precision(symbol, price)
if market['futures']:
request['leverage'] = '10' # or '20'
method = market['type'] + 'PostOrder'
else:
marginTrading = self.safe_string(params, 'margin_trading', '1') # 1 = spot, 2 = margin
request = self.extend(request, {
'side': side,
'type': type, # limit/market
'margin_trading': marginTrading, # 1 = spot, 2 = margin
})
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
notional = self.safe_float(params, 'notional')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if notional is None:
notional = amount * price
elif notional is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'notional' extra parameter(the exchange-specific behaviour)")
else:
notional = amount if (notional is None) else notional
precision = market['precision']['price']
request['notional'] = self.decimal_to_precision(notional, TRUNCATE, precision, self.precisionMode)
else:
request['size'] = self.amount_to_precision(symbol, amount)
method = 'marginPostOrders' if (marginTrading == '2') else 'spotPostOrders'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
order = self.parse_order(response, market)
return self.extend(order, {
'type': type,
'side': side,
})
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'cancelOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " cancelOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
method = type + 'PostCancelOrder'
request = {
'instrument_id': market['id'],
}
if market['futures'] or market['swap']:
method += 'InstrumentId'
else:
method += 's'
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
method += 'ClientOid'
request['client_oid'] = clientOrderId
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, ['type', 'client_oid', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, query))
result = response if ('result' in response) else self.safe_value(response, market['id'], {})
#
# spot, margin
#
# {
# "btc-usdt": [
# {
# "result":true,
# "client_oid":"a123",
# "order_id": "2510832677225473"
# }
# ]
# }
#
# futures, swap
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# "instrument_id": "EOS-USD-190628"
# }
#
return self.parse_order(result, market)
def parse_order_status(self, status):
statuses = {
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'1': 'buy', # open long
'2': 'sell', # open short
'3': 'sell', # close long
'4': 'buy', # close short
}
return self.safe_string(sides, side, side)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
# cancelOrder
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# # instrument_id is missing for spot/margin orders
# # available in futures and swap orders only
# "instrument_id": "EOS-USD-190628",
# }
#
# fetchOrder, fetchOrdersByState, fetchOpenOrders, fetchClosedOrders
#
# # spot and margin orders
#
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001", # filled_qty in futures and swap orders
# "funds":"", # self is most likely the same as notional
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT", # missing in futures and swap orders
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# }
#
# # futures and swap orders
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10", # filled_size in spot and margin orders
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567", # missing in spot and margin orders
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap, spot and margin orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap, spo and margin orders
# "order_type":"0"
# }
#
id = self.safe_string(order, 'order_id')
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type')
if (side != 'buy') and (side != 'sell'):
side = self.parse_order_side(type)
symbol = None
marketId = self.safe_string(order, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
if market is not None:
if symbol is None:
symbol = market['symbol']
amount = self.safe_float(order, 'size')
filled = self.safe_float_2(order, 'filled_size', 'filled_qty')
remaining = None
if amount is not None:
if filled is not None:
amount = max(amount, filled)
remaining = max(0, amount - filled)
if type == 'market':
remaining = 0
cost = self.safe_float_2(order, 'filled_notional', 'funds')
price = self.safe_float(order, 'price')
average = self.safe_float(order, 'price_avg')
if cost is None:
if filled is not None and average is not None:
cost = average * filled
else:
if (average is None) and (filled is not None) and (filled > 0):
average = cost / filled
status = self.parse_order_status(self.safe_string(order, 'state'))
feeCost = self.safe_float(order, 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'client_oid')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None # fix empty clientOrderId string
stopPrice = self.safe_float(order, 'trigger_price')
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
instrumentId = 'InstrumentId' if (market['futures'] or market['swap']) else ''
method = type + 'GetOrders' + instrumentId
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef12345', # optional, [a-z0-9]{1,32}
# 'order_id': id,
}
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# {
# "client_oid":"oktspot70",
# "created_at":"2019-03-15T02:52:56.000Z",
# "filled_notional":"3.8886",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2482659399697408",
# "order_type":"0",
# "price":"3927.3",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-15T02:52:56.000Z",
# "type":"limit"
# }
#
# futures, swap
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T02:46:38.000Z",
# "filled_qty":"10",
# "fee":"-0.0080819",
# "order_id":"2510946213248000",
# "price":"3.712",
# "price_avg":"3.712",
# "status":"2",
# "state": "2",
# "type":"2",
# "contract_val":"10",
# "leverage":"10",
# "client_oid":"", # missing in swap orders
# "pnl":"0", # missing in swap orders
# "order_type":"0"
# }
#
return self.parse_order(response)
def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByState() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrdersByState() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
request = {
'instrument_id': market['id'],
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
'state': state,
}
method = type + 'GetOrders'
if market['futures'] or market['swap']:
method += 'InstrumentId'
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# [
# # in fact, self documented API response does not correspond
# # to their actual API response for spot markets
# # OKEX v3 API returns a plain array of orders(see below)
# [
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# },
# ],
# {
# "before":"2500723297813504",
# "after":"2500650881647616"
# }
# ]
#
# futures, swap
#
# {
# "result":true, # missing in swap orders
# "order_info": [
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10",
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567",
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap orders
# "order_type":"0"
# },
# ]
# }
#
orders = None
if market['swap'] or market['futures']:
orders = self.safe_value(response, 'order_info', [])
else:
orders = response
responseLength = len(response)
if responseLength < 1:
return []
# in fact, self documented API response does not correspond
# to their actual API response for spot markets
# OKEX v3 API returns a plain array of orders
if responseLength > 1:
before = self.safe_value(response[1], 'before')
if before is not None:
orders = response[0]
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return self.fetch_orders_by_state('6', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return self.fetch_orders_by_state('7', symbol, since, limit, params)
def parse_deposit_addresses(self, addresses):
result = {}
for i in range(0, len(addresses)):
address = self.parse_deposit_address(addresses[i])
code = address['currency']
result[code] = address
return result
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# tag: 'abcde12345', # will be missing if the token does not require a deposit tag
# payment_id: 'abcde12345', # will not be returned if the token does not require a payment_id
# # can_deposit: 1, # 0 or 1, documented but missing
# # can_withdraw: 1, # 0 or 1, documented but missing
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string_2(depositAddress, 'tag', 'payment_id')
tag = self.safe_string(depositAddress, 'memo', tag)
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.accountGetDepositAddress(self.extend(request, params))
#
# [
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# }
# ]
#
addresses = self.parse_deposit_addresses(response)
address = self.safe_value(addresses, code)
if address is None:
raise InvalidAddress(self.id + ' fetchDepositAddress cannot return nonexistent addresses, you should create withdrawal addresses with the exchange website first')
return address
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
fee = self.safe_string(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + " withdraw() requires a `fee` string parameter, network transaction fee must be ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set '0'. Withdrawing to external digital asset address requires network transaction fee.")
request = {
'currency': currency['id'],
'to_address': address,
'destination': '4', # 2 = OKCoin International, 3 = OKEx 4 = others
'amount': self.number_to_string(amount),
'fee': fee, # String. Network transaction fee ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set as 0. Withdrawal to external digital asset address requires network transaction fee.
}
if 'password' in params:
request['trade_pwd'] = params['password']
elif 'trade_pwd' in params:
request['trade_pwd'] = params['trade_pwd']
elif self.password:
request['trade_pwd'] = self.password
query = self.omit(params, ['fee', 'password', 'trade_pwd'])
if not ('trade_pwd' in request):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = self.accountPostWithdrawal(self.extend(request, query))
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
return {
'info': response,
'id': self.safe_string(response, 'withdrawal_id'),
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
method = 'accountGetDepositHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
method = 'accountGetWithdrawalHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def parse_transaction_status(self, status):
#
# deposit statuses
#
# {
# '0': 'waiting for confirmation',
# '1': 'confirmation account',
# '2': 'recharge success'
# }
#
# withdrawal statues
#
# {
# '-3': 'pending cancel',
# '-2': 'cancelled',
# '-1': 'failed',
# '0': 'pending',
# '1': 'sending',
# '2': 'sent',
# '3': 'email confirmation',
# '4': 'manual confirmation',
# '5': 'awaiting identity confirmation'
# }
#
statuses = {
'-3': 'pending',
'-2': 'canceled',
'-1': 'failed',
'0': 'pending',
'1': 'pending',
'2': 'ok',
'3': 'pending',
'4': 'pending',
'5': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
# fetchWithdrawals
#
# {
# amount: "4.72100000",
# withdrawal_id: "1729116",
# fee: "0.01000000eth",
# txid: "0xf653125bbf090bcfe4b5e8e7b8f586a9d87aa7de94598702758c0802b…",
# currency: "ETH",
# from: "7147338839",
# to: "0x26a3CB49578F07000575405a57888681249c35Fd",
# timestamp: "2018-08-17T07:03:42.000Z",
# status: "2"
# }
#
# fetchDeposits
#
# {
# "amount": "4.19511659",
# "txid": "14c9a8c925647cdb7e5b2937ea9aefe2b29b2c273150ad3f44b3b8a4635ed437",
# "currency": "XMR",
# "from": "",
# "to": "48PjH3ksv1fiXniKvKvyH5UtFs5WhfS2Vf7U3TwzdRJtCc7HJWvCQe56dRahyhQyTAViXZ8Nzk4gQg6o4BJBMUoxNy8y8g7",
# "tag": "1234567",
# "deposit_id": 11571659, <-- we can use self
# "timestamp": "2019-10-01T14:54:19.000Z",
# "status": "2"
# }
#
type = None
id = None
address = None
withdrawalId = self.safe_string(transaction, 'withdrawal_id')
addressFrom = self.safe_string(transaction, 'from')
addressTo = self.safe_string(transaction, 'to')
tagTo = self.safe_string(transaction, 'tag')
if withdrawalId is not None:
type = 'withdrawal'
id = withdrawalId
address = addressTo
else:
# the payment_id will appear on new deposits but appears to be removed from the response after 2 months
id = self.safe_string_2(transaction, 'payment_id', 'deposit_id')
type = 'deposit'
address = addressTo
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
feeCost = None
if type == 'deposit':
feeCost = 0
else:
if currencyId is not None:
feeWithCurrencyId = self.safe_string(transaction, 'fee')
if feeWithCurrencyId is not None:
# https://github.com/ccxt/ccxt/pull/5748
lowercaseCurrencyId = currencyId.lower()
feeWithoutCurrencyId = feeWithCurrencyId.replace(lowercaseCurrencyId, '')
feeCost = float(feeWithoutCurrencyId)
# todo parse tags
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': None,
'tagTo': tagTo,
'tag': tagTo,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_my_trade(self, pair, market=None):
# check that trading symbols match in both entries
userTrade = self.safe_value(pair, 1)
otherTrade = self.safe_value(pair, 0)
firstMarketId = self.safe_string(otherTrade, 'instrument_id')
secondMarketId = self.safe_string(userTrade, 'instrument_id')
if firstMarketId != secondMarketId:
raise NotSupported(self.id + ' parseMyTrade() received unrecognized response format, differing instrument_ids in one fill, the exchange API might have changed, paste your verbose output: https://github.com/ccxt/ccxt/wiki/FAQ#what-is-required-to-get-help')
marketId = firstMarketId
market = self.safe_market(marketId, market)
symbol = market['symbol']
quoteId = market['quoteId']
side = None
amount = None
cost = None
receivedCurrencyId = self.safe_string(userTrade, 'currency')
feeCurrencyId = None
if receivedCurrencyId == quoteId:
side = self.safe_string(otherTrade, 'side')
amount = self.safe_float(otherTrade, 'size')
cost = self.safe_float(userTrade, 'size')
feeCurrencyId = self.safe_string(otherTrade, 'currency')
else:
side = self.safe_string(userTrade, 'side')
amount = self.safe_float(userTrade, 'size')
cost = self.safe_float(otherTrade, 'size')
feeCurrencyId = self.safe_string(userTrade, 'currency')
id = self.safe_string(userTrade, 'trade_id')
price = self.safe_float(userTrade, 'price')
feeCostFirst = self.safe_float(otherTrade, 'fee')
feeCostSecond = self.safe_float(userTrade, 'fee')
feeCurrencyCodeFirst = self.safe_currency_code(self.safe_string(otherTrade, 'currency'))
feeCurrencyCodeSecond = self.safe_currency_code(self.safe_string(userTrade, 'currency'))
fee = None
fees = None
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
if (feeCostFirst is not None) and (feeCostFirst != 0):
if (feeCostSecond is not None) and (feeCostSecond != 0):
fees = [
{
'cost': -feeCostFirst,
'currency': feeCurrencyCodeFirst,
},
{
'cost': -feeCostSecond,
'currency': feeCurrencyCodeSecond,
},
]
else:
fee = {
'cost': -feeCostFirst,
'currency': feeCurrencyCodeFirst,
}
elif (feeCostSecond is not None) and (feeCostSecond != 0):
fee = {
'cost': -feeCostSecond,
'currency': feeCurrencyCodeSecond,
}
else:
fee = {
'cost': 0,
'currency': self.safe_currency_code(feeCurrencyId),
}
#
# simplified structures to show the underlying semantics
#
# # market/limit sell
#
# {
# "currency":"USDT",
# "fee":"-0.04647925", # ←--- fee in received quote currency
# "price":"129.13", # ←------ price
# "size":"30.98616393", # ←-- cost
# },
# {
# "currency":"ETH",
# "fee":"0",
# "price":"129.13",
# "size":"0.23996099", # ←--- amount
# },
#
# # market/limit buy
#
# {
# "currency":"ETH",
# "fee":"-0.00036049", # ←--- fee in received base currency
# "price":"129.16", # ←------ price
# "size":"0.240322", # ←----- amount
# },
# {
# "currency":"USDT",
# "fee":"0",
# "price":"129.16",
# "size":"31.03998952", # ←-- cost
# }
#
timestamp = self.parse8601(self.safe_string_2(userTrade, 'timestamp', 'created_at'))
takerOrMaker = self.safe_string_2(userTrade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
orderId = self.safe_string(userTrade, 'order_id')
result = {
'info': pair,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
if fees is not None:
result['fees'] = fees
return result
def parse_my_trades(self, trades, market=None, since=None, limit=None, params={}):
grouped = self.group_by(trades, 'trade_id')
tradeIds = list(grouped.keys())
result = []
for i in range(0, len(tradeIds)):
tradeId = tradeIds[i]
pair = grouped[tradeId]
# make sure it has exactly 2 trades, no more, no less
numTradesInPair = len(pair)
if numTradesInPair == 2:
trade = self.parse_my_trade(pair)
result.append(trade)
symbol = None
if market is not None:
symbol = market['symbol']
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
# okex actually returns ledger entries instead of fills here, so each fill in the order
# is represented by two trades with opposite buy/sell sides, not one :\
# self aspect renders the 'fills' endpoint unusable for fetchOrderTrades
# until either OKEX fixes the API or we workaround self on our side somehow
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
if (limit is not None) and (limit > 100):
limit = 100
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
defaultType = self.safe_string_2(self.options, 'fetchMyTrades', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = type + 'GetFills'
response = getattr(self, method)(self.extend(request, query))
#
# [
# # sell
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"-0.04647925",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924353",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"30.98616393",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924352",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"0.23996099",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# # buy
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"-0.00036049",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922669",
# "liquidity":"T",
# "order_id": "4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"0.240322",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# },
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922668",
# "liquidity":"T",
# "order_id":"4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"31.03998952",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# }
# ]
#
return self.parse_my_trades(response, market, since, limit, params)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
# 'instrument_id': market['id'],
'order_id': id,
# 'after': '1', # return the page after the specified page number
# 'before': '1', # return the page before the specified page number
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
return self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
def fetch_position(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = None
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
type = market['type']
if (type == 'futures') or (type == 'swap'):
method = type + 'GetInstrumentIdPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPosition() requires an underlying parameter for ' + type + ' market ' + symbol)
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPosition() does not support ' + type + ' market ' + symbol + ', supported market types are futures, swap or option')
response = getattr(self, method)(self.extend(request, params))
#
# futures
#
# crossed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "2",
# "long_avail_qty": "2",
# "long_avg_cost": "8260",
# "long_settlement_price": "8260",
# "realised_pnl": "0.00020928",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_avg_cost": "8259.99",
# "short_settlement_price": "8259.99",
# "liquidation_price": "113.81",
# "instrument_id": "BTC-USD-191227",
# "leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T14:02:51.029Z",
# "margin_mode": "crossed",
# "short_margin": "0.00242197",
# "short_pnl": "6.63E-6",
# "short_pnl_ratio": "0.002477997",
# "short_unrealised_pnl": "6.63E-6",
# "long_margin": "0.00242197",
# "long_pnl": "-6.65E-6",
# "long_pnl_ratio": "-0.002478",
# "long_unrealised_pnl": "-6.65E-6",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8257.57"
# }
# ],
# "margin_mode": "crossed"
# }
#
# fixed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "4",
# "long_avail_qty": "4",
# "long_margin": "0.00323844",
# "long_liqui_price": "7762.09",
# "long_pnl_ratio": "0.06052306",
# "long_avg_cost": "8234.43",
# "long_settlement_price": "8234.43",
# "realised_pnl": "-0.00000296",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_margin": "0.00241105",
# "short_liqui_price": "9166.74",
# "short_pnl_ratio": "0.03318052",
# "short_avg_cost": "8295.13",
# "short_settlement_price": "8295.13",
# "instrument_id": "BTC-USD-191227",
# "long_leverage": "15",
# "short_leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T13:12:09.438Z",
# "margin_mode": "fixed",
# "short_margin_ratio": "0.10292507",
# "short_maint_margin_ratio": "0.005",
# "short_pnl": "7.853E-5",
# "short_unrealised_pnl": "7.853E-5",
# "long_margin_ratio": "0.07103743",
# "long_maint_margin_ratio": "0.005",
# "long_pnl": "1.9841E-4",
# "long_unrealised_pnl": "1.9841E-4",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8266.99"
# }
# ],
# "margin_mode": "fixed"
# }
#
# swap
#
# crossed margin mode
#
# {
# "margin_mode": "crossed",
# "timestamp": "2019-09-27T03:49:02.018Z",
# "holding": [
# {
# "avail_position": "3",
# "avg_cost": "59.49",
# "instrument_id": "LTC-USD-SWAP",
# "last": "55.98",
# "leverage": "10.00",
# "liquidation_price": "4.37",
# "maint_margin_ratio": "0.0100",
# "margin": "0.0536",
# "position": "3",
# "realized_pnl": "0.0000",
# "unrealized_pnl": "0",
# "settled_pnl": "-0.0330",
# "settlement_price": "55.84",
# "side": "long",
# "timestamp": "2019-09-27T03:49:02.018Z"
# },
# ]
# }
#
# fixed margin mode
#
# {
# "margin_mode": "fixed",
# "timestamp": "2019-09-27T03:47:37.230Z",
# "holding": [
# {
# "avail_position": "20",
# "avg_cost": "8025.0",
# "instrument_id": "BTC-USD-SWAP",
# "last": "8113.1",
# "leverage": "15.00",
# "liquidation_price": "7002.6",
# "maint_margin_ratio": "0.0050",
# "margin": "0.0454",
# "position": "20",
# "realized_pnl": "-0.0001",
# "unrealized_pnl": "0",
# "settled_pnl": "0.0076",
# "settlement_price": "8279.2",
# "side": "long",
# "timestamp": "2019-09-27T03:47:37.230Z"
# }
# ]
# }
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
def fetch_positions(self, symbols=None, since=None, limit=None, params={}):
self.load_markets()
method = None
defaultType = self.safe_string_2(self.options, 'fetchPositions', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if (type == 'futures') or (type == 'swap'):
method = type + 'GetPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPositions() requires an underlying parameter for ' + type + ' markets')
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPositions() does not support ' + type + ' markets, supported market types are futures, swap or option')
params = self.omit(params, 'type')
response = getattr(self, method)(params)
#
# futures
#
# ...
#
#
# swap
#
# ...
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchLedger', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
suffix = '' if (type == 'account') else 'Accounts'
argument = ''
request = {
# 'from': 'id',
# 'to': 'id',
}
if limit is not None:
request['limit'] = limit
currency = None
if (type == 'spot') or (type == 'futures'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a currency code argument for '" + type + "' markets")
argument = 'Currency'
currency = self.currency(code)
request['currency'] = currency['id']
elif (type == 'margin') or (type == 'swap'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a code argument(a market symbol) for '" + type + "' markets")
argument = 'InstrumentId'
market = self.market(code) # we intentionally put a market inside here for the margin and swap ledgers
currency = self.currency(market['base'])
request['instrument_id'] = market['id']
#
# if type == 'margin':
# #
# # 3. Borrow
# # 4. Repayment
# # 5. Interest
# # 7. Buy
# # 8. Sell
# # 9. From capital account
# # 10. From C2C
# # 11. From Futures
# # 12. From Spot
# # 13. From ETT
# # 14. To capital account
# # 15. To C2C
# # 16. To Spot
# # 17. To Futures
# # 18. To ETT
# # 19. Mandatory Repayment
# # 20. From Piggybank
# # 21. To Piggybank
# # 22. From Perpetual
# # 23. To Perpetual
# # 24. Liquidation Fee
# # 54. Clawback
# # 59. Airdrop Return.
# #
# request['type'] = 'number' # All types will be returned if self filed is left blank
# }
#
elif type == 'account':
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
#
# #
# # 1. deposit
# # 2. withdrawal
# # 13. cancel withdrawal
# # 18. into futures account
# # 19. out of futures account
# # 20. into sub account
# # 21. out of sub account
# # 28. claim
# # 29. into ETT account
# # 30. out of ETT account
# # 31. into C2C account
# # 32. out of C2C account
# # 33. into margin account
# # 34. out of margin account
# # 37. into spot account
# # 38. out of spot account
# #
# request['type'] = 'number'
#
else:
raise NotSupported(self.id + " fetchLedger does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
method = type + 'Get' + suffix + argument + 'Ledger'
response = getattr(self, method)(self.extend(request, query))
#
# transfer funds transfer in/out
# trade funds moved as a result of a trade, spot and margin accounts only
# rebate fee rebate as per fee schedule, spot and margin accounts only
# match open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
# fee fee, futures only
# settlement settlement/clawback/settle long/settle short
# liquidation force close long/force close short/deliver close long/deliver close short
# funding funding fee, swap only
# margin a change in the amount after adjusting margin, swap only
#
# account
#
# [
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
# ]
#
# spot
#
# [
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
# ]
#
# margin
#
# [
# [
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
# ],
# {
# "before":"78965766",
# "after":"78918186"
# }
# ]
#
# futures
#
# [
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
# ]
#
# swap
#
# [
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
# ]
#
responseLength = len(response)
if responseLength < 1:
return []
isArray = isinstance(response[0], list)
isMargin = (type == 'margin')
entries = response[0] if (isMargin and isArray) else response
if type == 'swap':
ledgerEntries = self.parse_ledger(entries)
return self.filter_by_symbol_since_limit(ledgerEntries, code, since, limit)
return self.parse_ledger(entries, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', # # funds transfer in/out
'trade': 'trade', # funds moved as a result of a trade, spot and margin accounts only
'rebate': 'rebate', # fee rebate as per fee schedule, spot and margin accounts only
'match': 'trade', # open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
'fee': 'fee', # fee, futures only
'settlement': 'trade', # settlement/clawback/settle long/settle short
'liquidation': 'trade', # force close long/force close short/deliver close long/deliver close short
'funding': 'fee', # funding fee, swap only
'margin': 'margin', # a change in the amount after adjusting margin, swap only
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
#
# account
#
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
#
# spot
#
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
#
# margin
#
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
#
# futures
#
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
#
# swap
#
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
#
id = self.safe_string(item, 'ledger_id')
account = None
details = self.safe_value(item, 'details', {})
referenceId = self.safe_string(details, 'order_id')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'currency'), currency)
amount = self.safe_float(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'timestamp'))
fee = {
'cost': self.safe_float(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_float(item, 'balance')
status = 'ok'
marketId = self.safe_string(item, 'instrument_id')
symbol = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
return {
'info': item,
'id': id,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'symbol': symbol,
'amount': amount,
'before': before, # balance before
'after': after, # balance after
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
isArray = isinstance(params, list)
request = '/api/' + api + '/' + self.version + '/'
request += path if isArray else self.implode_params(path, params)
query = params if isArray else self.omit(params, self.extract_params(path))
url = self.implode_params(self.urls['api']['rest'], {'hostname': self.hostname}) + request
type = self.get_path_authentication_type(path)
if type == 'public':
if query:
url += '?' + self.urlencode(query)
elif type == 'private':
self.check_required_credentials()
timestamp = self.iso8601(self.milliseconds())
headers = {
'OK-ACCESS-KEY': self.apiKey,
'OK-ACCESS-PASSPHRASE': self.password,
'OK-ACCESS-TIMESTAMP': timestamp,
# 'OK-FROM': '',
# 'OK-TO': '',
# 'OK-LIMIT': '',
}
auth = timestamp + method + request
if method == 'GET':
if query:
urlencodedQuery = '?' + self.urlencode(query)
url += urlencodedQuery
auth += urlencodedQuery
else:
if isArray or query:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers['OK-ACCESS-SIGN'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_path_authentication_type(self, path):
# https://github.com/ccxt/ccxt/issues/6651
# a special case to handle the optionGetUnderlying interefering with
# other endpoints containing self keyword
if path == 'underlying':
return 'public'
auth = self.safe_value(self.options, 'auth', {})
key = self.find_broadly_matched_key(auth, path)
return self.safe_string(auth, key, 'private')
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
feedback = self.id + ' ' + body
if code == 503:
# {"message":"name resolution failed"}
raise ExchangeNotAvailable(feedback)
#
# {"error_message":"Order does not exist","result":"true","error_code":"35029","order_id":"-1"}
#
message = self.safe_string(response, 'message')
errorCode = self.safe_string_2(response, 'code', 'error_code')
nonEmptyMessage = ((message is not None) and (message != ''))
nonZeroErrorCode = (errorCode is not None) and (errorCode != '0')
if nonEmptyMessage:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if nonZeroErrorCode:
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback) # unknown message
| 47.17664
| 521
| 0.469924
|
ge import Exchange
try:
basestring
except NameError:
basestring = str
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
class okex(Exchange):
def describe(self):
return self.deep_extend(super(okex, self).describe(), {
'id': 'okex',
'name': 'OKEX',
'countries': ['CN', 'US'],
'version': 'v3',
'rateLimit': 1000,
'pro': True,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': False,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': True,
'fetchTime': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
'futures': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2678400',
'3M': '8035200',
'6M': '16070400',
'1y': '31536000',
},
'hostname': 'okex.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/32552768-0d6dd3c6-c4a6-11e7-90f8-c043b64756a7.jpg',
'api': {
'rest': 'https://www.{hostname}',
},
'www': 'https://www.okex.com',
'doc': 'https://www.okex.com/docs/en/',
'fees': 'https://www.okex.com/pages/products/fees.html',
'referral': 'https://www.okex.com/join/1888677',
'test': {
'rest': 'https://testnet.okex.com',
},
},
'api': {
'general': {
'get': [
'time',
],
},
'account': {
'get': [
'wallet',
'sub-account',
'asset-valuation',
'wallet/{currency}',
'withdrawal/history',
'withdrawal/history/{currency}',
'ledger',
'deposit/address',
'deposit/history',
'deposit/history/{currency}',
'currencies',
'withdrawal/fee',
],
'post': [
'transfer',
'withdrawal',
],
},
'spot': {
'get': [
'accounts',
'accounts/{currency}',
'accounts/{currency}/ledger',
'orders',
'orders_pending',
'orders/{order_id}',
'orders/{client_oid}',
'trade_fee',
'fills',
'algo',
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
],
'post': [
'order_algo',
'orders',
'batch_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_algos',
'cancel_batch_orders',
],
},
'margin': {
'get': [
'accounts',
'accounts/{instrument_id}',
'accounts/{instrument_id}/ledger',
'accounts/availability',
'accounts/{instrument_id}/availability',
'accounts/borrowed',
'accounts/{instrument_id}/borrowed',
'orders',
'accounts/{instrument_id}/leverage',
'orders/{order_id}',
'orders/{client_oid}',
'orders_pending',
'fills',
'instruments/{instrument_id}/mark_price',
],
'post': [
'accounts/borrow',
'accounts/repayment',
'orders',
'batch_orders',
'cancel_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_orders',
'accounts/{instrument_id}/leverage',
],
},
'futures': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'accounts/{underlying}',
'accounts/{underlying}/leverage',
'accounts/{underlying}/ledger',
'order_algo/{instrument_id}',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'trade_fee',
'accounts/{instrument_id}/holds',
'order_algo/{instrument_id}',
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/estimated_price',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/liquidation',
],
'post': [
'accounts/{underlying}/leverage',
'order',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'accounts/margin_mode',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'swap': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'{instrument_id}/accounts',
'accounts/{instrument_id}/settings',
'accounts/{instrument_id}/ledger',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'accounts/{instrument_id}/holds',
'trade_fee',
'order_algo/{instrument_id}',
'instruments',
'instruments/{instrument_id}/depth',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/liquidation',
'instruments/{instrument_id}/funding_time',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/historical_funding_rate',
],
'post': [
'accounts/{instrument_id}/leverage',
'order',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'order_algo',
'cancel_algos',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'option': {
'get': [
'accounts',
'position',
'{underlying}/position',
'accounts/{underlying}',
'orders/{underlying}',
'fills/{underlying}',
'accounts/{underlying}/ledger',
'trade_fee',
'orders/{underlying}/{order_id}',
'orders/{underlying}/{client_oid}',
'underlying',
'instruments/{underlying}',
'instruments/{underlying}/summary',
'instruments/{underlying}/summary/{instrument_id}',
'instruments/{instrument_id}/book',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/candles',
],
'post': [
'order',
'orders',
'cancel_order/{underlying}/{order_id}',
'cancel_order/{underlying}/{client_oid}',
'cancel_batch_orders/{underlying}',
'amend_order/{underlying}',
'amend_batch_orders/{underlying}',
],
},
'index': {
'get': [
'{instrument_id}/constituents',
],
},
},
'fees': {
'trading': {
'taker': 0.0015,
'maker': 0.0010,
},
'spot': {
'taker': 0.0015,
'maker': 0.0010,
},
'futures': {
'taker': 0.0005,
'maker': 0.0002,
},
'swap': {
'taker': 0.00075,
'maker': 0.00020,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
'exact': {
'1': ExchangeError,
'failure to get a peer from the ring-balancer': ExchangeNotAvailable,
'Server is busy, please try again.': ExchangeNotAvailable,
'An unexpected error occurred': ExchangeError,
'System error': ExchangeError,
'4010': PermissionDenied,
Error,
'30001': AuthenticationError,
'30002': AuthenticationError,
'30003': AuthenticationError,
'30004': AuthenticationError,
'30005': InvalidNonce,
'30006': AuthenticationError,
'30007': BadRequest,
'30008': RequestTimeout,
'30009': ExchangeError,
'30010': AuthenticationError,
'30011': PermissionDenied,
'30012': AuthenticationError,
'30013': AuthenticationError,
'30014': DDoSProtection,
'30015': AuthenticationError,
'30016': ExchangeError,
'30017': ExchangeError,
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable,
'30020': BadRequest,
'30021': BadRequest,
'30022': PermissionDenied,
'30023': BadRequest,
'30024': BadSymbol,
'30025': BadRequest,
'30026': DDoSProtection,
'30027': AuthenticationError,
'30028': PermissionDenied,
'30029': AccountSuspended,
'30030': ExchangeNotAvailable,
'30031': BadRequest,
'30032': BadSymbol,
'30033': BadRequest,
'30034': ExchangeError,
'30035': ExchangeError,
'30036': ExchangeError,
'30037': ExchangeNotAvailable,
'30044': RequestTimeout,
'32001': AccountSuspended,
'32002': PermissionDenied,
'32003': CancelPending,
'32004': ExchangeError,
'32005': InvalidOrder,
'32006': InvalidOrder,
'32007': InvalidOrder,
'32008': InvalidOrder,
'32009': InvalidOrder,
'32010': ExchangeError,
'32011': ExchangeError,
'32012': ExchangeError,
'32013': ExchangeError,
'32014': ExchangeError,
'32015': ExchangeError,
'32016': ExchangeError,
'32017': ExchangeError,
'32018': ExchangeError,
'32019': ExchangeError,
'32020': ExchangeError,
'32021': ExchangeError,
'32022': ExchangeError,
'32023': ExchangeError,
'32024': ExchangeError,
'32025': ExchangeError,
'32026': ExchangeError,
'32027': ExchangeError,
'32028': ExchangeError,
'32029': ExchangeError,
'32030': InvalidOrder,
'32031': ArgumentsRequired,
'32038': AuthenticationError,
'32040': ExchangeError,
'32044': ExchangeError,
'32045': ExchangeError,
'32046': ExchangeError,
'32047': ExchangeError,
'32048': InvalidOrder,
'32049': ExchangeError,
'32050': InvalidOrder,
'32051': InvalidOrder,
'32052': ExchangeError,
'32053': ExchangeError,
'32057': ExchangeError,
'32054': ExchangeError,
'32055': InvalidOrder,
'32056': ExchangeError,
'32058': ExchangeError,
'32059': InvalidOrder,
'32060': InvalidOrder,
'32061': InvalidOrder,
'32062': InvalidOrder,
'32063': InvalidOrder,
'32064': ExchangeError,
'32065': ExchangeError,
'32066': ExchangeError,
'32067': ExchangeError,
'32068': ExchangeError,
'32069': ExchangeError,
'32070': ExchangeError,
'32071': ExchangeError,
'32072': ExchangeError,
'32073': ExchangeError,
'32074': ExchangeError,
'32075': ExchangeError,
'32076': ExchangeError,
'32077': ExchangeError,
'32078': ExchangeError,
'32079': ExchangeError,
'32080': ExchangeError,
'32083': ExchangeError,
'33001': PermissionDenied,
'33002': AccountSuspended,
'33003': InsufficientFunds,
'33004': ExchangeError,
'33005': ExchangeError,
'33006': ExchangeError,
'33007': ExchangeError,
'33008': InsufficientFunds,
'33009': ExchangeError,
'33010': ExchangeError,
'33011': ExchangeError,
'33012': ExchangeError,
'33013': InvalidOrder,
'33014': OrderNotFound,
'33015': InvalidOrder,
'33016': ExchangeError,
'33017': InsufficientFunds,
'33018': ExchangeError,
'33020': ExchangeError,
'33021': BadRequest,
'33022': InvalidOrder,
'33023': ExchangeError,
'33024': InvalidOrder,
'33025': InvalidOrder,
'33026': ExchangeError,
'33027': InvalidOrder,
'33028': InvalidOrder,
'33029': InvalidOrder,
'33034': ExchangeError,
'33035': ExchangeError,
'33036': ExchangeError,
'33037': ExchangeError,
'33038': ExchangeError,
'33039': ExchangeError,
'33040': ExchangeError,
'33041': ExchangeError,
'33042': ExchangeError,
'33043': ExchangeError,
'33044': ExchangeError,
'33045': ExchangeError,
'33046': ExchangeError,
'33047': ExchangeError,
'33048': ExchangeError,
'33049': ExchangeError,
'33050': ExchangeError,
'33051': ExchangeError,
'33059': BadRequest,
'33060': BadRequest,
'33061': ExchangeError,
'33062': ExchangeError,
'33063': ExchangeError,
'33064': ExchangeError,
'33065': ExchangeError,
'33085': InvalidOrder,
'21009': ExchangeError,
'34001': PermissionDenied,
'34002': InvalidAddress,
'34003': ExchangeError,
'34004': ExchangeError,
'34005': ExchangeError,
'34006': ExchangeError,
'34007': ExchangeError,
'34008': InsufficientFunds,
'34009': ExchangeError,
'34010': ExchangeError,
'34011': ExchangeError,
'34012': ExchangeError,
'34013': ExchangeError,
'34014': ExchangeError,
'34015': ExchangeError,
'34016': PermissionDenied,
'34017': AccountSuspended,
'34018': AuthenticationError,
'34019': PermissionDenied,
'34020': PermissionDenied,
'34021': InvalidAddress,
'34022': ExchangeError,
'34023': PermissionDenied,
'34026': RateLimitExceeded,
'34036': ExchangeError,
'34037': ExchangeError,
'34038': ExchangeError,
'34039': ExchangeError,
'35001': ExchangeError,
'35002': ExchangeError,
'35003': ExchangeError,
'35004': ExchangeError,
'35005': AuthenticationError,
'35008': InvalidOrder,
'35010': InvalidOrder,
'35012': InvalidOrder,
'35014': InvalidOrder,
'35015': InvalidOrder,
'35017': ExchangeError,
'35019': InvalidOrder,
'35020': InvalidOrder,
'35021': InvalidOrder,
'35022': BadRequest,
'35024': BadRequest,
'35025': InsufficientFunds,
'35026': BadRequest,
'35029': OrderNotFound,
'35030': InvalidOrder,
'35031': InvalidOrder,
'35032': ExchangeError,
'35037': ExchangeError,
'35039': ExchangeError,
'35040': InvalidOrder,
'35044': ExchangeError,
'35046': InsufficientFunds,
'35047': InsufficientFunds,
'35048': ExchangeError,
'35049': InvalidOrder,
'35050': InvalidOrder,
'35052': InsufficientFunds,
'35053': ExchangeError,
'35055': InsufficientFunds,
'35057': ExchangeError,
'35058': ExchangeError,
'35059': BadRequest,
'35060': BadRequest,
'35061': BadRequest,
'35062': InvalidOrder,
'35063': InvalidOrder,
'35064': InvalidOrder,
'35066': InvalidOrder,
'35067': InvalidOrder,
'35068': InvalidOrder,
'35069': InvalidOrder,
'35070': InvalidOrder,
'35071': InvalidOrder,
'35072': InvalidOrder,
'35073': InvalidOrder,
'35074': InvalidOrder,
'35075': InvalidOrder,
'35076': InvalidOrder,
'35077': InvalidOrder,
'35078': InvalidOrder,
'35079': InvalidOrder,
'35080': InvalidOrder,
'35081': InvalidOrder,
'35082': InvalidOrder,
'35083': InvalidOrder,
'35084': InvalidOrder,
'35085': InvalidOrder,
'35086': InvalidOrder,
'35087': InvalidOrder,
'35088': InvalidOrder,
'35089': InvalidOrder,
'35090': ExchangeError,
'35091': ExchangeError,
'35092': ExchangeError,
'35093': ExchangeError,
'35094': ExchangeError,
'35095': BadRequest,
'35096': ExchangeError,
'35097': ExchangeError,
'35098': ExchangeError,
'35099': ExchangeError,
'35102': RateLimitExceeded,
'36001': BadRequest,
'36002': BadRequest,
'36005': ExchangeError,
'36101': AuthenticationError,
'36102': PermissionDenied,
'36103': PermissionDenied,
'36104': PermissionDenied,
'36105': PermissionDenied,
'36106': PermissionDenied,
'36107': PermissionDenied,
'36108': InsufficientFunds,
'36109': PermissionDenied,
'36201': PermissionDenied,
'36202': PermissionDenied,
'36203': InvalidOrder,
'36204': ExchangeError,
'36205': BadRequest,
'36206': BadRequest,
'36207': InvalidOrder,
'36208': InvalidOrder,
'36209': InvalidOrder,
'36210': InvalidOrder,
'36211': InvalidOrder,
'36212': InvalidOrder,
'36213': InvalidOrder,
'36214': ExchangeError,
'36216': OrderNotFound,
'36217': InvalidOrder,
'36218': InvalidOrder,
'36219': InvalidOrder,
'36220': InvalidOrder,
'36221': InvalidOrder,
'36222': InvalidOrder,
'36223': InvalidOrder,
'36224': InvalidOrder,
'36225': InvalidOrder,
'36226': InvalidOrder,
'36227': InvalidOrder,
'36228': InvalidOrder,
'36229': InvalidOrder,
'36230': InvalidOrder,
},
'broad': {
},
},
'precisionMode': TICK_SIZE,
'options': {
'fetchOHLCV': {
'type': 'Candles',
},
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': ['spot', 'futures', 'swap', 'option'],
'defaultType': 'spot',
'auth': {
'time': 'public',
'currencies': 'private',
'instruments': 'public',
'rate': 'public',
'{instrument_id}/constituents': 'public',
},
},
'commonCurrencies': {
'AE': 'AET',
'BOX': 'DefiBox',
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'SBTC': 'Super Bitcoin',
'YOYO': 'YOYOW',
'WIN': 'WinToken',
},
})
def fetch_time(self, params={}):
response = self.generalGetTime(params)
return self.parse8601(self.safe_string(response, 'iso'))
def fetch_markets(self, params={}):
types = self.safe_value(self.options, 'fetchMarkets')
result = []
for i in range(0, len(types)):
markets = self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
id = self.safe_string(market, 'instrument_id')
marketType = 'spot'
spot = True
future = False
swap = False
option = False
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
contractVal = self.safe_float(market, 'contract_val')
if contractVal is not None:
if 'option_type' in market:
marketType = 'option'
spot = False
option = True
underlying = self.safe_string(market, 'underlying')
parts = underlying.split('-')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
else:
marketType = 'swap'
spot = False
swap = True
futuresAlias = self.safe_string(market, 'alias')
if futuresAlias is not None:
swap = False
future = True
marketType = 'futures'
baseId = self.safe_string(market, 'underlying_index')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote) if spot else id
lotSize = self.safe_float_2(market, 'lot_size', 'trade_increment')
precision = {
'amount': self.safe_float(market, 'size_increment', lotSize),
'price': self.safe_float(market, 'tick_size'),
}
minAmount = self.safe_float_2(market, 'min_size', 'base_min_size')
active = True
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': marketType,
'spot': spot,
'futures': future,
'swap': swap,
'option': option,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': precision['price'],
'max': None,
},
},
})
def fetch_markets_by_type(self, type, params={}):
if type == 'option':
underlying = self.optionGetUnderlying(params)
result = []
for i in range(0, len(underlying)):
response = self.optionGetInstrumentsUnderlying({
'underlying': underlying[i],
})
result = self.array_concat(result, response)
return self.parse_markets(result)
elif (type == 'spot') or (type == 'futures') or (type == 'swap'):
method = type + 'GetInstruments'
response = getattr(self, method)(params)
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType does not support market type ' + type)
def fetch_currencies(self, params={}):
response = self.accountGetCurrencies(params)
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
precision = 0.00000001
name = self.safe_string(currency, 'name')
canDeposit = self.safe_integer(currency, 'can_deposit')
canWithdraw = self.safe_integer(currency, 'can_withdraw')
active = True if (canDeposit and canWithdraw) else False
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {'min': None, 'max': None},
'price': {'min': None, 'max': None},
'cost': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_float(currency, 'min_withdrawal'),
'max': None,
},
},
}
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentId'
method += 'Depth' if (market['type'] == 'swap') else 'Book'
request = {
'instrument_id': market['id'],
}
if limit is not None:
request['size'] = limit
response = getattr(self, method)(self.extend(request, params))
timestamp = self.parse8601(self.safe_string(response, 'timestamp'))
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
symbol = None
marketId = self.safe_string(ticker, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
open = self.safe_float(ticker, 'open_24h')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high_24h'),
'low': self.safe_float(ticker, 'low_24h'),
'bid': self.safe_float(ticker, 'best_bid'),
'bidVolume': self.safe_float(ticker, 'best_bid_size'),
'ask': self.safe_float(ticker, 'best_ask'),
'askVolume': self.safe_float(ticker, 'best_ask_size'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'base_volume_24h'),
'quoteVolume': self.safe_float(ticker, 'quote_volume_24h'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTicker'
request = {
'instrument_id': market['id'],
}
response = getattr(self, method)(self.extend(request, params))
return self.parse_ticker(response)
def fetch_tickers_by_type(self, type, symbols=None, params={}):
self.load_markets()
method = type + 'GetInstrumentsTicker'
response = getattr(self, method)(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
nstrument_id')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'qty')
amount = self.safe_float(trade, 'order_qty', amount)
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
side = self.safe_string(trade, 'side')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
feeCost = self.safe_float(trade, 'fee')
fee = None
if feeCost is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
'cost': -feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string_2(trade, 'trade_id', 'ledger_id'),
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTrades'
if (limit is None) or (limit > 100):
limit = 100
request = {
'instrument_id': market['id'],
'limit': limit,
}
response = getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
if isinstance(ohlcv, list):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
timestamp = self.safe_value(ohlcv, 0)
if isinstance(timestamp, basestring):
timestamp = self.parse8601(timestamp)
return [
timestamp,
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, volumeIndex),
]
else:
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
duration = self.parse_timeframe(timeframe)
request = {
'instrument_id': market['id'],
'granularity': self.timeframes[timeframe],
}
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultType = self.safe_string(options, 'type', 'Candles')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = market['type'] + 'GetInstrumentsInstrumentId' + type
if type == 'Candles':
if since is not None:
if limit is not None:
request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['start'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
elif type == 'HistoryCandles':
if market['option']:
raise NotSupported(self.id + ' fetchOHLCV does not have ' + type + ' for ' + market['type'] + ' markets')
if since is not None:
if limit is None:
limit = 300
request['start'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['end'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['end'] = self.iso8601(now - limit * duration * 1000)
request['start'] = self.iso8601(now)
response = getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_account_balance(self, response):
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'hold')
account['free'] = self.safe_float(balance, 'available')
result[code] = account
return self.parse_balance(result)
def parse_margin_balance(self, response):
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
marketId = self.safe_string(balance, 'instrument_id')
market = self.safe_value(self.markets_by_id, marketId)
symbol = None
if market is None:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = market['symbol']
omittedBalance = self.omit(balance, [
'instrument_id',
'liquidation_price',
'product_id',
'risk_rate',
'margin_ratio',
'maint_margin_ratio',
'tiers',
])
keys = list(omittedBalance.keys())
accounts = {}
for k in range(0, len(keys)):
key = keys[k]
marketBalance = balance[key]
if key.find(':') >= 0:
parts = key.split(':')
currencyId = parts[1]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(marketBalance, 'balance')
account['used'] = self.safe_float(marketBalance, 'hold')
account['free'] = self.safe_float(marketBalance, 'available')
accounts[code] = account
else:
raise NotSupported(self.id + ' margin balance response format has changed!')
result[symbol] = self.parse_balance(accounts)
return result
def parse_futures_balance(self, response):
result = {'info': response}
info = self.safe_value(response, 'info', {})
ids = list(info.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
balance = self.safe_value(info, id, {})
account = self.account()
totalAvailBalance = self.safe_float(balance, 'total_avail_balance')
if self.safe_string(balance, 'margin_mode') == 'fixed':
contracts = self.safe_value(balance, 'contracts', [])
free = totalAvailBalance
for i in range(0, len(contracts)):
contract = contracts[i]
fixedBalance = self.safe_float(contract, 'fixed_balance')
realizedPnl = self.safe_float(contract, 'realized_pnl')
marginFrozen = self.safe_float(contract, 'margin_frozen')
marginForUnfilled = self.safe_float(contract, 'margin_for_unfilled')
margin = self.sum(fixedBalance, realizedPnl) - marginFrozen - marginForUnfilled
free = self.sum(free, margin)
account['free'] = free
else:
realizedPnl = self.safe_float(balance, 'realized_pnl')
unrealizedPnl = self.safe_float(balance, 'unrealized_pnl')
marginFrozen = self.safe_float(balance, 'margin_frozen')
marginForUnfilled = self.safe_float(balance, 'margin_for_unfilled')
account['free'] = self.sum(totalAvailBalance, realizedPnl, unrealizedPnl) - marginFrozen - marginForUnfilled
account['total'] = self.safe_float(balance, 'equity')
result[code] = account
return self.parse_balance(result)
def parse_swap_balance(self, response):
result = {'info': response}
info = self.safe_value(response, 'info', [])
for i in range(0, len(info)):
balance = info[i]
marketId = self.safe_string(balance, 'instrument_id')
symbol = marketId
if marketId in self.markets_by_id:
symbol = self.markets_by_id[marketId]['symbol']
account = self.account()
account['total'] = self.safe_float(balance, 'equity')
account['free'] = self.safe_float(balance, 'total_avail_balance')
result[symbol] = account
return self.parse_balance(result)
def fetch_balance(self, params={}):
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance() requires a type parameter(one of 'account', 'spot', 'margin', 'futures', 'swap')")
self.load_markets()
suffix = 'Wallet' if (type == 'account') else 'Accounts'
method = type + 'Get' + suffix
query = self.omit(params, 'type')
response = getattr(self, method)(query)
return self.parse_balance_by_type(type, response)
def parse_balance_by_type(self, type, response):
if (type == 'account') or (type == 'spot'):
return self.parse_account_balance(response)
elif type == 'margin':
return self.parse_margin_balance(response)
elif type == 'futures':
return self.parse_futures_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'instrument_id': market['id'],
Id is not None:
request['client_oid'] = clientOrderId
params = self.omit(params, ['client_oid', 'clientOrderId'])
method = None
if market['futures'] or market['swap']:
size = self.number_to_string(amount) if market['futures'] else self.amount_to_precision(symbol, amount)
request = self.extend(request, {
'type': type,
'size': size,
request['order_type'] = '4'
else:
request['price'] = self.price_to_precision(symbol, price)
if market['futures']:
request['leverage'] = '10'
method = market['type'] + 'PostOrder'
else:
marginTrading = self.safe_string(params, 'margin_trading', '1')
request = self.extend(request, {
'side': side,
'type': type,
'margin_trading': marginTrading,
})
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
if side == 'buy':
notional = self.safe_float(params, 'notional')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if notional is None:
notional = amount * price
elif notional is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'notional' extra parameter(the exchange-specific behaviour)")
else:
notional = amount if (notional is None) else notional
precision = market['precision']['price']
request['notional'] = self.decimal_to_precision(notional, TRUNCATE, precision, self.precisionMode)
else:
request['size'] = self.amount_to_precision(symbol, amount)
method = 'marginPostOrders' if (marginTrading == '2') else 'spotPostOrders'
response = getattr(self, method)(self.extend(request, params))
order = self.parse_order(response, market)
return self.extend(order, {
'type': type,
'side': side,
})
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'cancelOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " cancelOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
method = type + 'PostCancelOrder'
request = {
'instrument_id': market['id'],
}
if market['futures'] or market['swap']:
method += 'InstrumentId'
else:
method += 's'
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
method += 'ClientOid'
request['client_oid'] = clientOrderId
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, ['type', 'client_oid', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, query))
result = response if ('result' in response) else self.safe_value(response, market['id'], {})
return self.parse_order(result, market)
def parse_order_status(self, status):
statuses = {
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'1': 'buy',
'2': 'sell',
'3': 'sell',
'4': 'buy',
}
return self.safe_string(sides, side, side)
def parse_order(self, order, market=None):
, 'order_id')
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type')
if (side != 'buy') and (side != 'sell'):
side = self.parse_order_side(type)
symbol = None
marketId = self.safe_string(order, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
if market is not None:
if symbol is None:
symbol = market['symbol']
amount = self.safe_float(order, 'size')
filled = self.safe_float_2(order, 'filled_size', 'filled_qty')
remaining = None
if amount is not None:
if filled is not None:
amount = max(amount, filled)
remaining = max(0, amount - filled)
if type == 'market':
remaining = 0
cost = self.safe_float_2(order, 'filled_notional', 'funds')
price = self.safe_float(order, 'price')
average = self.safe_float(order, 'price_avg')
if cost is None:
if filled is not None and average is not None:
cost = average * filled
else:
if (average is None) and (filled is not None) and (filled > 0):
average = cost / filled
status = self.parse_order_status(self.safe_string(order, 'state'))
feeCost = self.safe_float(order, 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'client_oid')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None
stopPrice = self.safe_float(order, 'trigger_price')
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
instrumentId = 'InstrumentId' if (market['futures'] or market['swap']) else ''
method = type + 'GetOrders' + instrumentId
request = {
'instrument_id': market['id'],
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
urn self.parse_order(response)
def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByState() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrdersByState() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
request = {
'instrument_id': market['id'],
'state': state,
}
method = type + 'GetOrders'
if market['futures'] or market['swap']:
method += 'InstrumentId'
query = self.omit(params, 'type')
response = getattr(self, method)(self.extend(request, query))
orders = None
if market['swap'] or market['futures']:
orders = self.safe_value(response, 'order_info', [])
else:
orders = response
responseLength = len(response)
if responseLength < 1:
return []
if responseLength > 1:
before = self.safe_value(response[1], 'before')
if before is not None:
orders = response[0]
return self.parse_orders(orders, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_state('6', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_state('7', symbol, since, limit, params)
def parse_deposit_addresses(self, addresses):
result = {}
for i in range(0, len(addresses)):
address = self.parse_deposit_address(addresses[i])
code = address['currency']
result[code] = address
return result
def parse_deposit_address(self, depositAddress, currency=None):
currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.accountGetDepositAddress(self.extend(request, params))
addresses = self.parse_deposit_addresses(response)
address = self.safe_value(addresses, code)
if address is None:
raise InvalidAddress(self.id + ' fetchDepositAddress cannot return nonexistent addresses, you should create withdrawal addresses with the exchange website first')
return address
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
fee = self.safe_string(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + " withdraw() requires a `fee` string parameter, network transaction fee must be ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set '0'. Withdrawing to external digital asset address requires network transaction fee.")
request = {
'currency': currency['id'],
'to_address': address,
'destination': '4',
'amount': self.number_to_string(amount),
'fee': fee,
}
if 'password' in params:
request['trade_pwd'] = params['password']
elif 'trade_pwd' in params:
request['trade_pwd'] = params['trade_pwd']
elif self.password:
request['trade_pwd'] = self.password
query = self.omit(params, ['fee', 'password', 'trade_pwd'])
if not ('trade_pwd' in request):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = self.accountPostWithdrawal(self.extend(request, query))
return {
'info': response,
'id': self.safe_string(response, 'withdrawal_id'),
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
method = 'accountGetDepositHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
method = 'accountGetWithdrawalHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
'-3': 'pending',
'-2': 'canceled',
'-1': 'failed',
'0': 'pending',
'1': 'pending',
'2': 'ok',
'3': 'pending',
'4': 'pending',
'5': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
type = None
id = None
address = None
withdrawalId = self.safe_string(transaction, 'withdrawal_id')
addressFrom = self.safe_string(transaction, 'from')
addressTo = self.safe_string(transaction, 'to')
tagTo = self.safe_string(transaction, 'tag')
if withdrawalId is not None:
type = 'withdrawal'
id = withdrawalId
address = addressTo
else:
id = self.safe_string_2(transaction, 'payment_id', 'deposit_id')
type = 'deposit'
address = addressTo
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
feeCost = None
if type == 'deposit':
feeCost = 0
else:
if currencyId is not None:
feeWithCurrencyId = self.safe_string(transaction, 'fee')
if feeWithCurrencyId is not None:
lowercaseCurrencyId = currencyId.lower()
feeWithoutCurrencyId = feeWithCurrencyId.replace(lowercaseCurrencyId, '')
feeCost = float(feeWithoutCurrencyId)
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': None,
'tagTo': tagTo,
'tag': tagTo,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_my_trade(self, pair, market=None):
userTrade = self.safe_value(pair, 1)
otherTrade = self.safe_value(pair, 0)
firstMarketId = self.safe_string(otherTrade, 'instrument_id')
secondMarketId = self.safe_string(userTrade, 'instrument_id')
if firstMarketId != secondMarketId:
raise NotSupported(self.id + ' parseMyTrade() received unrecognized response format, differing instrument_ids in one fill, the exchange API might have changed, paste your verbose output: https://github.com/ccxt/ccxt/wiki/FAQ#what-is-required-to-get-help')
marketId = firstMarketId
market = self.safe_market(marketId, market)
symbol = market['symbol']
quoteId = market['quoteId']
side = None
amount = None
cost = None
receivedCurrencyId = self.safe_string(userTrade, 'currency')
feeCurrencyId = None
if receivedCurrencyId == quoteId:
side = self.safe_string(otherTrade, 'side')
amount = self.safe_float(otherTrade, 'size')
cost = self.safe_float(userTrade, 'size')
feeCurrencyId = self.safe_string(otherTrade, 'currency')
else:
side = self.safe_string(userTrade, 'side')
amount = self.safe_float(userTrade, 'size')
cost = self.safe_float(otherTrade, 'size')
feeCurrencyId = self.safe_string(userTrade, 'currency')
id = self.safe_string(userTrade, 'trade_id')
price = self.safe_float(userTrade, 'price')
feeCostFirst = self.safe_float(otherTrade, 'fee')
feeCostSecond = self.safe_float(userTrade, 'fee')
feeCurrencyCodeFirst = self.safe_currency_code(self.safe_string(otherTrade, 'currency'))
feeCurrencyCodeSecond = self.safe_currency_code(self.safe_string(userTrade, 'currency'))
fee = None
fees = None
if (feeCostFirst is not None) and (feeCostFirst != 0):
if (feeCostSecond is not None) and (feeCostSecond != 0):
fees = [
{
'cost': -feeCostFirst,
'currency': feeCurrencyCodeFirst,
},
{
'cost': -feeCostSecond,
'currency': feeCurrencyCodeSecond,
},
]
else:
fee = {
'cost': -feeCostFirst,
'currency': feeCurrencyCodeFirst,
}
elif (feeCostSecond is not None) and (feeCostSecond != 0):
fee = {
'cost': -feeCostSecond,
'currency': feeCurrencyCodeSecond,
}
else:
fee = {
'cost': 0,
'currency': self.safe_currency_code(feeCurrencyId),
}
timestamp = self.parse8601(self.safe_string_2(userTrade, 'timestamp', 'created_at'))
takerOrMaker = self.safe_string_2(userTrade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
orderId = self.safe_string(userTrade, 'order_id')
result = {
'info': pair,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
if fees is not None:
result['fees'] = fees
return result
def parse_my_trades(self, trades, market=None, since=None, limit=None, params={}):
grouped = self.group_by(trades, 'trade_id')
tradeIds = list(grouped.keys())
result = []
for i in range(0, len(tradeIds)):
tradeId = tradeIds[i]
pair = grouped[tradeId]
numTradesInPair = len(pair)
if numTradesInPair == 2:
trade = self.parse_my_trade(pair)
result.append(trade)
symbol = None
if market is not None:
symbol = market['symbol']
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
if (limit is not None) and (limit > 100):
limit = 100
request = {
'instrument_id': market['id'],
s, 'type')
method = type + 'GetFills'
response = getattr(self, method)(self.extend(request, query))
return self.parse_my_trades(response, market, since, limit, params)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
'order_id': id,
rams={}):
self.load_markets()
market = self.market(symbol)
method = None
request = {
'instrument_id': market['id'],
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPosition() requires an underlying parameter for ' + type + ' market ' + symbol)
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPosition() does not support ' + type + ' market ' + symbol + ', supported market types are futures, swap or option')
response = getattr(self, method)(self.extend(request, params))
return response
def fetch_positions(self, symbols=None, since=None, limit=None, params={}):
self.load_markets()
method = None
defaultType = self.safe_string_2(self.options, 'fetchPositions', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if (type == 'futures') or (type == 'swap'):
method = type + 'GetPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPositions() requires an underlying parameter for ' + type + ' markets')
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPositions() does not support ' + type + ' markets, supported market types are futures, swap or option')
params = self.omit(params, 'type')
response = getattr(self, method)(params)
return response
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchLedger', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
suffix = '' if (type == 'account') else 'Accounts'
argument = ''
request = {
}
if limit is not None:
request['limit'] = limit
currency = None
if (type == 'spot') or (type == 'futures'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a currency code argument for '" + type + "' markets")
argument = 'Currency'
currency = self.currency(code)
request['currency'] = currency['id']
elif (type == 'margin') or (type == 'swap'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a code argument(a market symbol) for '" + type + "' markets")
argument = 'InstrumentId'
market = self.market(code)
currency = self.currency(market['base'])
request['instrument_id'] = market['id']
method = type + 'Get' + suffix + argument + 'Ledger'
response = getattr(self, method)(self.extend(request, query))
responseLength = len(response)
if responseLength < 1:
return []
isArray = isinstance(response[0], list)
isMargin = (type == 'margin')
entries = response[0] if (isMargin and isArray) else response
if type == 'swap':
ledgerEntries = self.parse_ledger(entries)
return self.filter_by_symbol_since_limit(ledgerEntries, code, since, limit)
return self.parse_ledger(entries, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', trade',
'rebate': 'rebate',
'match': 'trade',
'fee': 'fee',
'settlement': 'trade',
'liquidation': 'trade',
'funding': 'fee',
'margin': 'margin',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
id = self.safe_string(item, 'ledger_id')
account = None
details = self.safe_value(item, 'details', {})
referenceId = self.safe_string(details, 'order_id')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'currency'), currency)
amount = self.safe_float(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'timestamp'))
fee = {
'cost': self.safe_float(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_float(item, 'balance')
status = 'ok'
marketId = self.safe_string(item, 'instrument_id')
symbol = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
return {
'info': item,
'id': id,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'symbol': symbol,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
isArray = isinstance(params, list)
request = '/api/' + api + '/' + self.version + '/'
request += path if isArray else self.implode_params(path, params)
query = params if isArray else self.omit(params, self.extract_params(path))
url = self.implode_params(self.urls['api']['rest'], {'hostname': self.hostname}) + request
type = self.get_path_authentication_type(path)
if type == 'public':
if query:
url += '?' + self.urlencode(query)
elif type == 'private':
self.check_required_credentials()
timestamp = self.iso8601(self.milliseconds())
headers = {
'OK-ACCESS-KEY': self.apiKey,
'OK-ACCESS-PASSPHRASE': self.password,
'OK-ACCESS-TIMESTAMP': timestamp,
}
auth = timestamp + method + request
if method == 'GET':
if query:
urlencodedQuery = '?' + self.urlencode(query)
url += urlencodedQuery
auth += urlencodedQuery
else:
if isArray or query:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers['OK-ACCESS-SIGN'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_path_authentication_type(self, path):
if path == 'underlying':
return 'public'
auth = self.safe_value(self.options, 'auth', {})
key = self.find_broadly_matched_key(auth, path)
return self.safe_string(auth, key, 'private')
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return
feedback = self.id + ' ' + body
if code == 503:
raise ExchangeNotAvailable(feedback)
message = self.safe_string(response, 'message')
errorCode = self.safe_string_2(response, 'code', 'error_code')
nonEmptyMessage = ((message is not None) and (message != ''))
nonZeroErrorCode = (errorCode is not None) and (errorCode != '0')
if nonEmptyMessage:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if nonZeroErrorCode:
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback)
| true
| true
|
79070c3df17af284200d5bc7b0d6c56f78213d26
| 7,245
|
py
|
Python
|
src/train_amp.py
|
suiyizhao/Pytorch-speedup
|
a9d4b0accc703035559ac6f42daddf8b1f0eb40a
|
[
"MIT"
] | 3
|
2021-11-15T01:43:11.000Z
|
2021-12-06T03:14:36.000Z
|
src/train_amp.py
|
suiyizhao/Template
|
a9d4b0accc703035559ac6f42daddf8b1f0eb40a
|
[
"MIT"
] | null | null | null |
src/train_amp.py
|
suiyizhao/Template
|
a9d4b0accc703035559ac6f42daddf8b1f0eb40a
|
[
"MIT"
] | null | null | null |
import sys
import time
import torch
import random
import argparse
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision import datasets
from torch.utils.data import DataLoader
# new #
import torch.cuda.amp as amp
def printParaNum(model):
'''
function: print the number of total parameters and trainable parameters
'''
total_params = sum(p.numel() for p in model.parameters())
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Total parameters: %d' % total_params)
print('Trainable parameters: %d' % total_trainable_params)
def set_random_seed(seed, deterministic=False):
'''
function: Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
'''
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.model = nn.Sequential(
nn.ReflectionPad2d(1), nn.Conv2d(1, 3, 3, 2), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 3, 3, 1), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 8, 3, 2), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 8, 3, 1), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 16, 3, 2), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 16, 3, 1), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 32, 3, 2), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(32, 32, 3, 1), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.Flatten(), nn.Linear(128, 10)
)
self.initialize_weights()
def forward(self, img):
out = self.model(img)
return out
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data, 0, 0.01)
m.bias.data.zero_()
time_begin = time.time()
print('---------------------------------------- step 1/5 : parameters preparing... ----------------------------------------')
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=5, help="number of epochs of training")
parser.add_argument("--lr", type=float, default=0.0002, help="learning rate")
parser.add_argument("--batch_size", type=int, default=2048, help="size of the batches")
parser.add_argument("--workers", type=int, default=4, help="number of cpu threads to use during batch generation")
parser.add_argument("--dataset", type=str, default='../dataset/mnist', help="dataset root")
parser.add_argument("--result_dir", type=str, default='../result', help="dir for saving the results")
opt = parser.parse_args()
print(opt)
set_random_seed(1234, deterministic=True)
time_1 = time.time()
print('---------------------------------------- step 2/5 : data loading... ------------------------------------------------')
dataset = datasets.MNIST(opt.dataset, train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]))
dataloader = DataLoader(dataset=dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
time_2 = time.time()
print('---------------------------------------- step 3/5 : model defining... ----------------------------------------------')
model = Model().cuda()
printParaNum(model)
time_3 = time.time()
print('---------------------------------------- step 4/5 : requisites defining... -----------------------------------------')
# Loss function
loss_func = nn.CrossEntropyLoss()
# Optimizers
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(0.5, 0.999))
# NEW #
scaler = amp.GradScaler()
time_4 = time.time()
print('---------------------------------------- step 5/5 : training... ----------------------------------------------------')
f = open(opt.result_dir + '/log_' + sys.argv[0][0:-3] + '.txt', 'w')
f.write('Type: single machine, single card, mixing precision' + '\n')
f.write('Parallel manner: none' + '\n')
f.write('Mixing manner: amp' + '\n')
f.write('Setting: epochs: {}, lr: {}, batch_size: {}, workers: {}'.format(opt.epochs, opt.lr, opt.batch_size, opt.workers) + '\n')
f.write('----------------------------' + '\n')
f.write('Training: ' + '\n')
f.write('----------------------------' + '\n')
time_4_dataloading = 0
time_4_computing = 0
for epoch in range(opt.epochs):
time_4_begin = time.time()
for i, (imgs, labels) in enumerate(dataloader):
imgs = imgs.cuda()
labels = labels.cuda()
time_temp = time.time()
time_4_dataloading += time_temp - time_4_begin
optimizer.zero_grad()
# new #
with amp.autocast():
pred = model(imgs)
loss = loss_func(pred, labels)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
_, pred = torch.max(pred, 1)
acc = (pred == labels).sum().item() / len(labels)
print('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc))
f.write('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc) + '\n')
time_4_computing += time.time() - time_temp
time_4_begin = time.time()
time_5 = time.time()
f.write('\n')
f.write('TIME COST' + '\n')
f.write('Parameters preparing: {:.6f}(s)'.format(time_1 - time_begin) + '\n')
f.write('Data loading: {:.6f}(s)'.format(time_2 - time_1) + '\n')
f.write('Model defining: {:.6f}(s)'.format(time_3 - time_2) + '\n')
f.write('Requisites defining: {:.6f}(s)'.format(time_4 - time_3) + '\n')
f.write('Training: {:.6f}(s)'.format(time_5 - time_4) + '\n')
f.write(' Training (dataloading): {:.6f}(s)'.format(time_4_dataloading) + '\n')
f.write(' Training (computing): {:.6f}(s)'.format(time_4_computing) + '\n')
f.close()
torch.save(model.state_dict(), opt.result_dir + '/model_' + sys.argv[0][0:-3] + '.pkl')
| 41.4
| 130
| 0.580814
|
import sys
import time
import torch
import random
import argparse
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.cuda.amp as amp
def printParaNum(model):
total_params = sum(p.numel() for p in model.parameters())
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Total parameters: %d' % total_params)
print('Trainable parameters: %d' % total_trainable_params)
def set_random_seed(seed, deterministic=False):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.model = nn.Sequential(
nn.ReflectionPad2d(1), nn.Conv2d(1, 3, 3, 2), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 3, 3, 1), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(3, 8, 3, 2), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 8, 3, 1), nn.BatchNorm2d(8), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(8, 16, 3, 2), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 16, 3, 1), nn.BatchNorm2d(16), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(16, 32, 3, 2), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.ReflectionPad2d(1), nn.Conv2d(32, 32, 3, 1), nn.BatchNorm2d(32), nn.LeakyReLU(0.2, inplace=True),
nn.Flatten(), nn.Linear(128, 10)
)
self.initialize_weights()
def forward(self, img):
out = self.model(img)
return out
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data, 0, 0.01)
m.bias.data.zero_()
time_begin = time.time()
print('---------------------------------------- step 1/5 : parameters preparing... ----------------------------------------')
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=5, help="number of epochs of training")
parser.add_argument("--lr", type=float, default=0.0002, help="learning rate")
parser.add_argument("--batch_size", type=int, default=2048, help="size of the batches")
parser.add_argument("--workers", type=int, default=4, help="number of cpu threads to use during batch generation")
parser.add_argument("--dataset", type=str, default='../dataset/mnist', help="dataset root")
parser.add_argument("--result_dir", type=str, default='../result', help="dir for saving the results")
opt = parser.parse_args()
print(opt)
set_random_seed(1234, deterministic=True)
time_1 = time.time()
print('---------------------------------------- step 2/5 : data loading... ------------------------------------------------')
dataset = datasets.MNIST(opt.dataset, train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]))
dataloader = DataLoader(dataset=dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
time_2 = time.time()
print('---------------------------------------- step 3/5 : model defining... ----------------------------------------------')
model = Model().cuda()
printParaNum(model)
time_3 = time.time()
print('---------------------------------------- step 4/5 : requisites defining... -----------------------------------------')
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(0.5, 0.999))
scaler = amp.GradScaler()
time_4 = time.time()
print('---------------------------------------- step 5/5 : training... ----------------------------------------------------')
f = open(opt.result_dir + '/log_' + sys.argv[0][0:-3] + '.txt', 'w')
f.write('Type: single machine, single card, mixing precision' + '\n')
f.write('Parallel manner: none' + '\n')
f.write('Mixing manner: amp' + '\n')
f.write('Setting: epochs: {}, lr: {}, batch_size: {}, workers: {}'.format(opt.epochs, opt.lr, opt.batch_size, opt.workers) + '\n')
f.write('----------------------------' + '\n')
f.write('Training: ' + '\n')
f.write('----------------------------' + '\n')
time_4_dataloading = 0
time_4_computing = 0
for epoch in range(opt.epochs):
time_4_begin = time.time()
for i, (imgs, labels) in enumerate(dataloader):
imgs = imgs.cuda()
labels = labels.cuda()
time_temp = time.time()
time_4_dataloading += time_temp - time_4_begin
optimizer.zero_grad()
with amp.autocast():
pred = model(imgs)
loss = loss_func(pred, labels)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
_, pred = torch.max(pred, 1)
acc = (pred == labels).sum().item() / len(labels)
print('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc))
f.write('Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>4}/{:0>4}] Loss: {:.4f} Acc: {:.4f}'.format(
epoch + 1, opt.epochs, i + 1, len(dataloader), loss, acc) + '\n')
time_4_computing += time.time() - time_temp
time_4_begin = time.time()
time_5 = time.time()
f.write('\n')
f.write('TIME COST' + '\n')
f.write('Parameters preparing: {:.6f}(s)'.format(time_1 - time_begin) + '\n')
f.write('Data loading: {:.6f}(s)'.format(time_2 - time_1) + '\n')
f.write('Model defining: {:.6f}(s)'.format(time_3 - time_2) + '\n')
f.write('Requisites defining: {:.6f}(s)'.format(time_4 - time_3) + '\n')
f.write('Training: {:.6f}(s)'.format(time_5 - time_4) + '\n')
f.write(' Training (dataloading): {:.6f}(s)'.format(time_4_dataloading) + '\n')
f.write(' Training (computing): {:.6f}(s)'.format(time_4_computing) + '\n')
f.close()
torch.save(model.state_dict(), opt.result_dir + '/model_' + sys.argv[0][0:-3] + '.pkl')
| true
| true
|
79070c51c152d01c88b859e9c7282d79bc2ef5a2
| 1,593
|
py
|
Python
|
tests/test_05_weight.py
|
VolumeFi/somm-wbtc-eth-test-cellar
|
862b9a5c747ac2622c216073ce3d3f753d45db78
|
[
"Apache-2.0"
] | null | null | null |
tests/test_05_weight.py
|
VolumeFi/somm-wbtc-eth-test-cellar
|
862b9a5c747ac2622c216073ce3d3f753d45db78
|
[
"Apache-2.0"
] | null | null | null |
tests/test_05_weight.py
|
VolumeFi/somm-wbtc-eth-test-cellar
|
862b9a5c747ac2622c216073ce3d3f753d45db78
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import pytest
def test_weight(WBTC, WETH, accounts, SwapRouter, NonfungiblePositionManager, CellarPoolShareContract):
ACCURACY = 10 ** 6
SwapRouter.exactOutputSingle([WETH, WBTC, 3000, accounts[0], 2 ** 256 - 1, 10 ** 7, 2 * 10 ** 18, 0], {"from": accounts[0], "value": 2 * 10 ** 18})
WBTC.approve(CellarPoolShareContract, 10 ** 7, {"from": accounts[0]})
ETH_amount = 10 ** 18
WBTC_amount = 5 * 10 ** 6
cellarAddParams = [WBTC_amount, ETH_amount, 0, 0, 2 ** 256 - 1]
CellarPoolShareContract.addLiquidityForUniV3(cellarAddParams, {"from": accounts[0], "value": ETH_amount})
cellarAddParams = [WBTC_amount, ETH_amount, 0, 0, 2 ** 256 - 1]
CellarPoolShareContract.addLiquidityForUniV3(cellarAddParams, {"from": accounts[0], "value": ETH_amount})
token_id_0 = NonfungiblePositionManager.tokenOfOwnerByIndex(CellarPoolShareContract, 0)
liq_0 = NonfungiblePositionManager.positions(token_id_0)[7]
weight_0 = CellarPoolShareContract.cellarTickInfo(0)[3]
NFT_count = NonfungiblePositionManager.balanceOf(CellarPoolShareContract)
for i in range(NFT_count - 1):
token_id = NonfungiblePositionManager.tokenOfOwnerByIndex(CellarPoolShareContract, i + 1)
liq = NonfungiblePositionManager.positions(token_id)[7]
weight = CellarPoolShareContract.cellarTickInfo(i + 1)[3]
assert approximateCompare(liq_0 * weight, liq * weight_0, ACCURACY)
def approximateCompare(a, b, accuracy):
delta = 0
if a > b:
return (a - b) * accuracy < a
else:
return (b - a) * accuracy < b
| 49.78125
| 151
| 0.700565
|
import pytest
def test_weight(WBTC, WETH, accounts, SwapRouter, NonfungiblePositionManager, CellarPoolShareContract):
ACCURACY = 10 ** 6
SwapRouter.exactOutputSingle([WETH, WBTC, 3000, accounts[0], 2 ** 256 - 1, 10 ** 7, 2 * 10 ** 18, 0], {"from": accounts[0], "value": 2 * 10 ** 18})
WBTC.approve(CellarPoolShareContract, 10 ** 7, {"from": accounts[0]})
ETH_amount = 10 ** 18
WBTC_amount = 5 * 10 ** 6
cellarAddParams = [WBTC_amount, ETH_amount, 0, 0, 2 ** 256 - 1]
CellarPoolShareContract.addLiquidityForUniV3(cellarAddParams, {"from": accounts[0], "value": ETH_amount})
cellarAddParams = [WBTC_amount, ETH_amount, 0, 0, 2 ** 256 - 1]
CellarPoolShareContract.addLiquidityForUniV3(cellarAddParams, {"from": accounts[0], "value": ETH_amount})
token_id_0 = NonfungiblePositionManager.tokenOfOwnerByIndex(CellarPoolShareContract, 0)
liq_0 = NonfungiblePositionManager.positions(token_id_0)[7]
weight_0 = CellarPoolShareContract.cellarTickInfo(0)[3]
NFT_count = NonfungiblePositionManager.balanceOf(CellarPoolShareContract)
for i in range(NFT_count - 1):
token_id = NonfungiblePositionManager.tokenOfOwnerByIndex(CellarPoolShareContract, i + 1)
liq = NonfungiblePositionManager.positions(token_id)[7]
weight = CellarPoolShareContract.cellarTickInfo(i + 1)[3]
assert approximateCompare(liq_0 * weight, liq * weight_0, ACCURACY)
def approximateCompare(a, b, accuracy):
delta = 0
if a > b:
return (a - b) * accuracy < a
else:
return (b - a) * accuracy < b
| true
| true
|
79070cbabc52296c664029e2c39b4d5a3ed1e19a
| 20,649
|
py
|
Python
|
lib/python3.8/site-packages/ansible_collections/community/aws/plugins/modules/sns_topic.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/community/aws/plugins/modules/sns_topic.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/community/aws/plugins/modules/sns_topic.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: sns_topic
short_description: Manages AWS SNS topics and subscriptions
version_added: 1.0.0
description:
- The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
- As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account.
author:
- "Joel Thompson (@joelthompson)"
- "Fernando Jose Pando (@nand0p)"
- "Will Thames (@willthames)"
options:
name:
description:
- The name or ARN of the SNS topic to manage.
required: true
type: str
state:
description:
- Whether to create or destroy an SNS topic.
default: present
choices: ["absent", "present"]
type: str
display_name:
description:
- Display name of the topic.
type: str
policy:
description:
- Policy to apply to the SNS topic.
type: dict
delivery_policy:
description:
- Delivery policy to apply to the SNS topic.
type: dict
subscriptions:
description:
- List of subscriptions to apply to the topic. Note that AWS requires
subscriptions to be confirmed, so you will need to confirm any new
subscriptions.
suboptions:
endpoint:
description: Endpoint of subscription.
required: true
protocol:
description: Protocol of subscription.
required: true
type: list
elements: dict
default: []
purge_subscriptions:
description:
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
allow you to purge any PendingConfirmation subscriptions, so if any
exist and would be purged, they are silently skipped. This means that
somebody could come back later and confirm the subscription. Sorry.
Blame Amazon."
default: true
type: bool
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
requirements: [ "boto" ]
'''
EXAMPLES = r"""
- name: Create alarm SNS topic
community.aws.sns_topic:
name: "alarms"
state: present
display_name: "alarm SNS topic"
delivery_policy:
http:
defaultHealthyRetryPolicy:
minDelayTarget: 2
maxDelayTarget: 4
numRetries: 3
numMaxDelayRetries: 5
backoffFunction: "<linear|arithmetic|geometric|exponential>"
disableSubscriptionOverrides: True
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
- endpoint: "my_email_address@example.com"
protocol: "email"
- endpoint: "my_mobile_number"
protocol: "sms"
"""
RETURN = r'''
sns_arn:
description: The ARN of the topic you are modifying
type: str
returned: always
sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name"
community.aws.sns_topic:
description: Dict of sns topic details
type: complex
returned: always
contains:
attributes_set:
description: list of attributes set during this run
returned: always
type: list
sample: []
check_mode:
description: whether check mode was on
returned: always
type: bool
sample: false
delivery_policy:
description: Delivery policy for the SNS topic
returned: when topic is owned by this AWS account
type: str
sample: >
{"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0,
"numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}}
display_name:
description: Display name for SNS topic
returned: when topic is owned by this AWS account
type: str
sample: My topic name
name:
description: Topic name
returned: always
type: str
sample: ansible-test-dummy-topic
owner:
description: AWS account that owns the topic
returned: when topic is owned by this AWS account
type: str
sample: '111111111111'
policy:
description: Policy for the SNS topic
returned: when topic is owned by this AWS account
type: str
sample: >
{"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111111111:root"},
"Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]}
state:
description: whether the topic is present or absent
returned: always
type: str
sample: present
subscriptions:
description: List of subscribers to the topic in this AWS account
returned: always
type: list
sample: []
subscriptions_added:
description: List of subscribers added in this run
returned: always
type: list
sample: []
subscriptions_confirmed:
description: Count of confirmed subscriptions
returned: when topic is owned by this AWS account
type: str
sample: '0'
subscriptions_deleted:
description: Count of deleted subscriptions
returned: when topic is owned by this AWS account
type: str
sample: '0'
subscriptions_existing:
description: List of existing subscriptions
returned: always
type: list
sample: []
subscriptions_new:
description: List of new subscriptions
returned: always
type: list
sample: []
subscriptions_pending:
description: Count of pending subscriptions
returned: when topic is owned by this AWS account
type: str
sample: '0'
subscriptions_purge:
description: Whether or not purge_subscriptions was set
returned: always
type: bool
sample: true
topic_arn:
description: ARN of the SNS topic (equivalent to sns_arn)
returned: when topic is owned by this AWS account
type: str
sample: arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic
topic_created:
description: Whether the topic was created
returned: always
type: bool
sample: false
topic_deleted:
description: Whether the topic was deleted
returned: always
type: bool
sample: false
'''
import json
import re
import copy
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict
class SnsTopicManager(object):
""" Handles SNS Topic creation and destruction """
def __init__(self,
module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode):
self.connection = module.client('sns')
self.module = module
self.name = name
self.state = state
self.display_name = display_name
self.policy = policy
self.delivery_policy = delivery_policy
self.subscriptions = subscriptions
self.subscriptions_existing = []
self.subscriptions_deleted = []
self.subscriptions_added = []
self.purge_subscriptions = purge_subscriptions
self.check_mode = check_mode
self.topic_created = False
self.topic_deleted = False
self.topic_arn = None
self.attributes_set = []
@AWSRetry.jittered_backoff()
def _list_topics_with_backoff(self):
paginator = self.connection.get_paginator('list_topics')
return paginator.paginate().build_full_result()['Topics']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
def _list_topic_subscriptions_with_backoff(self):
paginator = self.connection.get_paginator('list_subscriptions_by_topic')
return paginator.paginate(TopicArn=self.topic_arn).build_full_result()['Subscriptions']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
def _list_subscriptions_with_backoff(self):
paginator = self.connection.get_paginator('list_subscriptions')
return paginator.paginate().build_full_result()['Subscriptions']
def _list_topics(self):
try:
topics = self._list_topics_with_backoff()
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get topic list")
return [t['TopicArn'] for t in topics]
def _topic_arn_lookup(self):
# topic names cannot have colons, so this captures the full topic name
all_topics = self._list_topics()
lookup_topic = ':%s' % self.name
for topic in all_topics:
if topic.endswith(lookup_topic):
return topic
def _create_topic(self):
if not self.check_mode:
try:
response = self.connection.create_topic(Name=self.name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name)
self.topic_arn = response['TopicArn']
return True
def _compare_delivery_policies(self, policy_a, policy_b):
_policy_a = copy.deepcopy(policy_a)
_policy_b = copy.deepcopy(policy_b)
# AWS automatically injects disableSubscriptionOverrides if you set an
# http policy
if 'http' in policy_a:
if 'disableSubscriptionOverrides' not in policy_a['http']:
_policy_a['http']['disableSubscriptionOverrides'] = False
if 'http' in policy_b:
if 'disableSubscriptionOverrides' not in policy_b['http']:
_policy_b['http']['disableSubscriptionOverrides'] = False
comparison = (_policy_a != _policy_b)
return comparison
def _set_topic_attrs(self):
changed = False
try:
topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn)
if self.display_name and self.display_name != topic_attributes['DisplayName']:
changed = True
self.attributes_set.append('display_name')
if not self.check_mode:
try:
self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName',
AttributeValue=self.display_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set display name")
if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])):
changed = True
self.attributes_set.append('policy')
if not self.check_mode:
try:
self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy',
AttributeValue=json.dumps(self.policy))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set topic policy")
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
self._compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))):
changed = True
self.attributes_set.append('delivery_policy')
if not self.check_mode:
try:
self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy',
AttributeValue=json.dumps(self.delivery_policy))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy")
return changed
def _canonicalize_endpoint(self, protocol, endpoint):
if protocol == 'sms':
return re.sub('[^0-9]*', '', endpoint)
return endpoint
def _set_topic_subs(self):
changed = False
subscriptions_existing_list = set()
desired_subscriptions = [(sub['protocol'],
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
self.subscriptions]
for sub in self._list_topic_subscriptions():
sub_key = (sub['Protocol'], sub['Endpoint'])
subscriptions_existing_list.add(sub_key)
if (self.purge_subscriptions and sub_key not in desired_subscriptions and
sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')):
changed = True
self.subscriptions_deleted.append(sub_key)
if not self.check_mode:
try:
self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
for protocol, endpoint in set(desired_subscriptions).difference(subscriptions_existing_list):
changed = True
self.subscriptions_added.append((protocol, endpoint))
if not self.check_mode:
try:
self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn)
return changed
def _list_topic_subscriptions(self):
try:
return self._list_topic_subscriptions_with_backoff()
except is_boto3_error_code('AuthorizationError'):
try:
# potentially AuthorizationError when listing subscriptions for third party topic
return [sub for sub in self._list_subscriptions_with_backoff()
if sub['TopicArn'] == self.topic_arn]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
def _delete_subscriptions(self):
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
subscriptions = self._list_topic_subscriptions()
if not subscriptions:
return False
for sub in subscriptions:
if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
self.subscriptions_deleted.append(sub['SubscriptionArn'])
if not self.check_mode:
try:
self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
return True
def _delete_topic(self):
self.topic_deleted = True
if not self.check_mode:
try:
self.connection.delete_topic(TopicArn=self.topic_arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn)
return True
def _name_is_arn(self):
return self.name.startswith('arn:')
def ensure_ok(self):
changed = False
if self._name_is_arn():
self.topic_arn = self.name
else:
self.topic_arn = self._topic_arn_lookup()
if not self.topic_arn:
changed = self._create_topic()
if self.topic_arn in self._list_topics():
changed |= self._set_topic_attrs()
elif self.display_name or self.policy or self.delivery_policy:
self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account")
changed |= self._set_topic_subs()
return changed
def ensure_gone(self):
changed = False
if self._name_is_arn():
self.topic_arn = self.name
else:
self.topic_arn = self._topic_arn_lookup()
if self.topic_arn:
if self.topic_arn not in self._list_topics():
self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe")
changed = self._delete_subscriptions()
changed |= self._delete_topic()
return changed
def get_info(self):
info = {
'name': self.name,
'state': self.state,
'subscriptions_new': self.subscriptions,
'subscriptions_existing': self.subscriptions_existing,
'subscriptions_deleted': self.subscriptions_deleted,
'subscriptions_added': self.subscriptions_added,
'subscriptions_purge': self.purge_subscriptions,
'check_mode': self.check_mode,
'topic_created': self.topic_created,
'topic_deleted': self.topic_deleted,
'attributes_set': self.attributes_set,
}
if self.state != 'absent':
if self.topic_arn in self._list_topics():
info.update(camel_dict_to_snake_dict(self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']))
info['delivery_policy'] = info.pop('effective_delivery_policy')
info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in self._list_topic_subscriptions()]
return info
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
display_name=dict(),
policy=dict(type='dict'),
delivery_policy=dict(type='dict'),
subscriptions=dict(default=[], type='list', elements='dict'),
purge_subscriptions=dict(type='bool', default=True),
)
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True)
name = module.params.get('name')
state = module.params.get('state')
display_name = module.params.get('display_name')
policy = module.params.get('policy')
delivery_policy = module.params.get('delivery_policy')
subscriptions = module.params.get('subscriptions')
purge_subscriptions = module.params.get('purge_subscriptions')
check_mode = module.check_mode
sns_topic = SnsTopicManager(module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode)
if state == 'present':
changed = sns_topic.ensure_ok()
elif state == 'absent':
changed = sns_topic.ensure_gone()
sns_facts = dict(changed=changed,
sns_arn=sns_topic.topic_arn,
sns_topic=sns_topic.get_info())
module.exit_json(**sns_facts)
if __name__ == '__main__':
main()
| 39.256654
| 162
| 0.638239
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: sns_topic
short_description: Manages AWS SNS topics and subscriptions
version_added: 1.0.0
description:
- The M(community.aws.sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
- As of 2.6, this module can be use to subscribe and unsubscribe to topics outside of your AWS account.
author:
- "Joel Thompson (@joelthompson)"
- "Fernando Jose Pando (@nand0p)"
- "Will Thames (@willthames)"
options:
name:
description:
- The name or ARN of the SNS topic to manage.
required: true
type: str
state:
description:
- Whether to create or destroy an SNS topic.
default: present
choices: ["absent", "present"]
type: str
display_name:
description:
- Display name of the topic.
type: str
policy:
description:
- Policy to apply to the SNS topic.
type: dict
delivery_policy:
description:
- Delivery policy to apply to the SNS topic.
type: dict
subscriptions:
description:
- List of subscriptions to apply to the topic. Note that AWS requires
subscriptions to be confirmed, so you will need to confirm any new
subscriptions.
suboptions:
endpoint:
description: Endpoint of subscription.
required: true
protocol:
description: Protocol of subscription.
required: true
type: list
elements: dict
default: []
purge_subscriptions:
description:
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
allow you to purge any PendingConfirmation subscriptions, so if any
exist and would be purged, they are silently skipped. This means that
somebody could come back later and confirm the subscription. Sorry.
Blame Amazon."
default: true
type: bool
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
requirements: [ "boto" ]
'''
EXAMPLES = r"""
- name: Create alarm SNS topic
community.aws.sns_topic:
name: "alarms"
state: present
display_name: "alarm SNS topic"
delivery_policy:
http:
defaultHealthyRetryPolicy:
minDelayTarget: 2
maxDelayTarget: 4
numRetries: 3
numMaxDelayRetries: 5
backoffFunction: "<linear|arithmetic|geometric|exponential>"
disableSubscriptionOverrides: True
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
- endpoint: "my_email_address@example.com"
protocol: "email"
- endpoint: "my_mobile_number"
protocol: "sms"
"""
RETURN = r'''
sns_arn:
description: The ARN of the topic you are modifying
type: str
returned: always
sample: "arn:aws:sns:us-east-2:111111111111:my_topic_name"
community.aws.sns_topic:
description: Dict of sns topic details
type: complex
returned: always
contains:
attributes_set:
description: list of attributes set during this run
returned: always
type: list
sample: []
check_mode:
description: whether check mode was on
returned: always
type: bool
sample: false
delivery_policy:
description: Delivery policy for the SNS topic
returned: when topic is owned by this AWS account
type: str
sample: >
{"http":{"defaultHealthyRetryPolicy":{"minDelayTarget":20,"maxDelayTarget":20,"numRetries":3,"numMaxDelayRetries":0,
"numNoDelayRetries":0,"numMinDelayRetries":0,"backoffFunction":"linear"},"disableSubscriptionOverrides":false}}
display_name:
description: Display name for SNS topic
returned: when topic is owned by this AWS account
type: str
sample: My topic name
name:
description: Topic name
returned: always
type: str
sample: ansible-test-dummy-topic
owner:
description: AWS account that owns the topic
returned: when topic is owned by this AWS account
type: str
sample: '111111111111'
policy:
description: Policy for the SNS topic
returned: when topic is owned by this AWS account
type: str
sample: >
{"Version":"2012-10-17","Id":"SomePolicyId","Statement":[{"Sid":"ANewSid","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111111111:root"},
"Action":"sns:Subscribe","Resource":"arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic","Condition":{"StringEquals":{"sns:Protocol":"email"}}}]}
state:
description: whether the topic is present or absent
returned: always
type: str
sample: present
subscriptions:
description: List of subscribers to the topic in this AWS account
returned: always
type: list
sample: []
subscriptions_added:
description: List of subscribers added in this run
returned: always
type: list
sample: []
subscriptions_confirmed:
description: Count of confirmed subscriptions
returned: when topic is owned by this AWS account
type: str
sample: '0'
subscriptions_deleted:
description: Count of deleted subscriptions
returned: when topic is owned by this AWS account
type: str
sample: '0'
subscriptions_existing:
description: List of existing subscriptions
returned: always
type: list
sample: []
subscriptions_new:
description: List of new subscriptions
returned: always
type: list
sample: []
subscriptions_pending:
description: Count of pending subscriptions
returned: when topic is owned by this AWS account
type: str
sample: '0'
subscriptions_purge:
description: Whether or not purge_subscriptions was set
returned: always
type: bool
sample: true
topic_arn:
description: ARN of the SNS topic (equivalent to sns_arn)
returned: when topic is owned by this AWS account
type: str
sample: arn:aws:sns:us-east-2:111111111111:ansible-test-dummy-topic
topic_created:
description: Whether the topic was created
returned: always
type: bool
sample: false
topic_deleted:
description: Whether the topic was deleted
returned: always
type: bool
sample: false
'''
import json
import re
import copy
try:
import botocore
except ImportError:
pass
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies, AWSRetry, camel_dict_to_snake_dict
class SnsTopicManager(object):
def __init__(self,
module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode):
self.connection = module.client('sns')
self.module = module
self.name = name
self.state = state
self.display_name = display_name
self.policy = policy
self.delivery_policy = delivery_policy
self.subscriptions = subscriptions
self.subscriptions_existing = []
self.subscriptions_deleted = []
self.subscriptions_added = []
self.purge_subscriptions = purge_subscriptions
self.check_mode = check_mode
self.topic_created = False
self.topic_deleted = False
self.topic_arn = None
self.attributes_set = []
@AWSRetry.jittered_backoff()
def _list_topics_with_backoff(self):
paginator = self.connection.get_paginator('list_topics')
return paginator.paginate().build_full_result()['Topics']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
def _list_topic_subscriptions_with_backoff(self):
paginator = self.connection.get_paginator('list_subscriptions_by_topic')
return paginator.paginate(TopicArn=self.topic_arn).build_full_result()['Subscriptions']
@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound'])
def _list_subscriptions_with_backoff(self):
paginator = self.connection.get_paginator('list_subscriptions')
return paginator.paginate().build_full_result()['Subscriptions']
def _list_topics(self):
try:
topics = self._list_topics_with_backoff()
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get topic list")
return [t['TopicArn'] for t in topics]
def _topic_arn_lookup(self):
# topic names cannot have colons, so this captures the full topic name
all_topics = self._list_topics()
lookup_topic = ':%s' % self.name
for topic in all_topics:
if topic.endswith(lookup_topic):
return topic
def _create_topic(self):
if not self.check_mode:
try:
response = self.connection.create_topic(Name=self.name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name)
self.topic_arn = response['TopicArn']
return True
def _compare_delivery_policies(self, policy_a, policy_b):
_policy_a = copy.deepcopy(policy_a)
_policy_b = copy.deepcopy(policy_b)
if 'http' in policy_a:
if 'disableSubscriptionOverrides' not in policy_a['http']:
_policy_a['http']['disableSubscriptionOverrides'] = False
if 'http' in policy_b:
if 'disableSubscriptionOverrides' not in policy_b['http']:
_policy_b['http']['disableSubscriptionOverrides'] = False
comparison = (_policy_a != _policy_b)
return comparison
def _set_topic_attrs(self):
changed = False
try:
topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn)
if self.display_name and self.display_name != topic_attributes['DisplayName']:
changed = True
self.attributes_set.append('display_name')
if not self.check_mode:
try:
self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DisplayName',
AttributeValue=self.display_name)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set display name")
if self.policy and compare_policies(self.policy, json.loads(topic_attributes['Policy'])):
changed = True
self.attributes_set.append('policy')
if not self.check_mode:
try:
self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='Policy',
AttributeValue=json.dumps(self.policy))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set topic policy")
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or
self._compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))):
changed = True
self.attributes_set.append('delivery_policy')
if not self.check_mode:
try:
self.connection.set_topic_attributes(TopicArn=self.topic_arn, AttributeName='DeliveryPolicy',
AttributeValue=json.dumps(self.delivery_policy))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy")
return changed
def _canonicalize_endpoint(self, protocol, endpoint):
if protocol == 'sms':
return re.sub('[^0-9]*', '', endpoint)
return endpoint
def _set_topic_subs(self):
changed = False
subscriptions_existing_list = set()
desired_subscriptions = [(sub['protocol'],
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
self.subscriptions]
for sub in self._list_topic_subscriptions():
sub_key = (sub['Protocol'], sub['Endpoint'])
subscriptions_existing_list.add(sub_key)
if (self.purge_subscriptions and sub_key not in desired_subscriptions and
sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')):
changed = True
self.subscriptions_deleted.append(sub_key)
if not self.check_mode:
try:
self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
for protocol, endpoint in set(desired_subscriptions).difference(subscriptions_existing_list):
changed = True
self.subscriptions_added.append((protocol, endpoint))
if not self.check_mode:
try:
self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn)
return changed
def _list_topic_subscriptions(self):
try:
return self._list_topic_subscriptions_with_backoff()
except is_boto3_error_code('AuthorizationError'):
try:
return [sub for sub in self._list_subscriptions_with_backoff()
if sub['TopicArn'] == self.topic_arn]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except
self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
def _delete_subscriptions(self):
subscriptions = self._list_topic_subscriptions()
if not subscriptions:
return False
for sub in subscriptions:
if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
self.subscriptions_deleted.append(sub['SubscriptionArn'])
if not self.check_mode:
try:
self.connection.unsubscribe(SubscriptionArn=sub['SubscriptionArn'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't unsubscribe from topic")
return True
def _delete_topic(self):
self.topic_deleted = True
if not self.check_mode:
try:
self.connection.delete_topic(TopicArn=self.topic_arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn)
return True
def _name_is_arn(self):
return self.name.startswith('arn:')
def ensure_ok(self):
changed = False
if self._name_is_arn():
self.topic_arn = self.name
else:
self.topic_arn = self._topic_arn_lookup()
if not self.topic_arn:
changed = self._create_topic()
if self.topic_arn in self._list_topics():
changed |= self._set_topic_attrs()
elif self.display_name or self.policy or self.delivery_policy:
self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account")
changed |= self._set_topic_subs()
return changed
def ensure_gone(self):
changed = False
if self._name_is_arn():
self.topic_arn = self.name
else:
self.topic_arn = self._topic_arn_lookup()
if self.topic_arn:
if self.topic_arn not in self._list_topics():
self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe")
changed = self._delete_subscriptions()
changed |= self._delete_topic()
return changed
def get_info(self):
info = {
'name': self.name,
'state': self.state,
'subscriptions_new': self.subscriptions,
'subscriptions_existing': self.subscriptions_existing,
'subscriptions_deleted': self.subscriptions_deleted,
'subscriptions_added': self.subscriptions_added,
'subscriptions_purge': self.purge_subscriptions,
'check_mode': self.check_mode,
'topic_created': self.topic_created,
'topic_deleted': self.topic_deleted,
'attributes_set': self.attributes_set,
}
if self.state != 'absent':
if self.topic_arn in self._list_topics():
info.update(camel_dict_to_snake_dict(self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes']))
info['delivery_policy'] = info.pop('effective_delivery_policy')
info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in self._list_topic_subscriptions()]
return info
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
display_name=dict(),
policy=dict(type='dict'),
delivery_policy=dict(type='dict'),
subscriptions=dict(default=[], type='list', elements='dict'),
purge_subscriptions=dict(type='bool', default=True),
)
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True)
name = module.params.get('name')
state = module.params.get('state')
display_name = module.params.get('display_name')
policy = module.params.get('policy')
delivery_policy = module.params.get('delivery_policy')
subscriptions = module.params.get('subscriptions')
purge_subscriptions = module.params.get('purge_subscriptions')
check_mode = module.check_mode
sns_topic = SnsTopicManager(module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode)
if state == 'present':
changed = sns_topic.ensure_ok()
elif state == 'absent':
changed = sns_topic.ensure_gone()
sns_facts = dict(changed=changed,
sns_arn=sns_topic.topic_arn,
sns_topic=sns_topic.get_info())
module.exit_json(**sns_facts)
if __name__ == '__main__':
main()
| true
| true
|
79070cf8be8d1e450b0e741b7984ba9e1ae73ce7
| 1,799
|
py
|
Python
|
modules/subscribers/flottsbro/flottsbro.py
|
KTH/alvares
|
75f1006b79c8bc319385230ba1e0b7fa0d4fea10
|
[
"MIT"
] | null | null | null |
modules/subscribers/flottsbro/flottsbro.py
|
KTH/alvares
|
75f1006b79c8bc319385230ba1e0b7fa0d4fea10
|
[
"MIT"
] | 3
|
2020-03-05T12:21:23.000Z
|
2021-09-22T14:36:24.000Z
|
modules/subscribers/flottsbro/flottsbro.py
|
KTH/alvares
|
75f1006b79c8bc319385230ba1e0b7fa0d4fea10
|
[
"MIT"
] | null | null | null |
__author__ = 'tinglev'
import logging
import requests
from requests import HTTPError, ConnectTimeout, RequestException
from modules import environment
from modules.subscribers.slack import slack_util
from modules.event_system.event_system import subscribe_to_event, unsubscribe_from_event
from modules import deployment_util
LOG = logging.getLogger(__name__)
DEFAULT_FLOTTSBRO_API_BASE_URL = 'https://api-r.referens.sys.kth.se/api/pipeline'
def subscribe():
subscribe_to_event('deployment', handle_deployment)
def unsubscribe():
unsubscribe_from_event('deployment', handle_deployment)
def handle_deployment(deployment):
global LOG
add(deployment)
return deployment
def get_base_url():
return environment.get_env_with_default_value(environment.FLOTTSBRO_API_BASE_URL, DEFAULT_FLOTTSBRO_API_BASE_URL)
def get_add_endpoint(cluster):
return '{}/v1/latest/{}'.format(get_base_url(), cluster)
def add(deployment):
call_endpoint(get_add_endpoint(deployment["cluster"]), deployment)
def get_headers():
api_key = environment.get_env(environment.FLOTTSBRO_API_KEY)
if not api_key:
LOG.error('No header env FLOTTSBRO_API_KEY specified ')
return None
return {
'api_key': api_key
}
def call_endpoint(endpoint, deployment):
global LOG
try:
headers = get_headers()
if headers:
response = requests.post(endpoint, data=deployment, headers=headers)
LOG.debug('Calling "%s", response was "%s"', endpoint, response.text)
else:
LOG.info('Skipped calling flottsbro-api, header constraints not satisfied.')
except (HTTPError, ConnectTimeout, RequestException) as request_ex:
LOG.error('Could not add deployment to Flottsbro-API: "%s"', request_ex)
| 31.561404
| 117
| 0.740411
|
__author__ = 'tinglev'
import logging
import requests
from requests import HTTPError, ConnectTimeout, RequestException
from modules import environment
from modules.subscribers.slack import slack_util
from modules.event_system.event_system import subscribe_to_event, unsubscribe_from_event
from modules import deployment_util
LOG = logging.getLogger(__name__)
DEFAULT_FLOTTSBRO_API_BASE_URL = 'https://api-r.referens.sys.kth.se/api/pipeline'
def subscribe():
subscribe_to_event('deployment', handle_deployment)
def unsubscribe():
unsubscribe_from_event('deployment', handle_deployment)
def handle_deployment(deployment):
global LOG
add(deployment)
return deployment
def get_base_url():
return environment.get_env_with_default_value(environment.FLOTTSBRO_API_BASE_URL, DEFAULT_FLOTTSBRO_API_BASE_URL)
def get_add_endpoint(cluster):
return '{}/v1/latest/{}'.format(get_base_url(), cluster)
def add(deployment):
call_endpoint(get_add_endpoint(deployment["cluster"]), deployment)
def get_headers():
api_key = environment.get_env(environment.FLOTTSBRO_API_KEY)
if not api_key:
LOG.error('No header env FLOTTSBRO_API_KEY specified ')
return None
return {
'api_key': api_key
}
def call_endpoint(endpoint, deployment):
global LOG
try:
headers = get_headers()
if headers:
response = requests.post(endpoint, data=deployment, headers=headers)
LOG.debug('Calling "%s", response was "%s"', endpoint, response.text)
else:
LOG.info('Skipped calling flottsbro-api, header constraints not satisfied.')
except (HTTPError, ConnectTimeout, RequestException) as request_ex:
LOG.error('Could not add deployment to Flottsbro-API: "%s"', request_ex)
| true
| true
|
79070d17a4163f46519228f051f77c1390ac6edb
| 1,285
|
py
|
Python
|
tf2onnx/tflite/LessOptions.py
|
LoicDagnas/tensorflow-onnx
|
6691850e79047d05d85017573170fd8240393b57
|
[
"Apache-2.0"
] | 1,473
|
2018-03-16T02:47:33.000Z
|
2022-03-31T03:43:52.000Z
|
tf2onnx/tflite/LessOptions.py
|
LoicDagnas/tensorflow-onnx
|
6691850e79047d05d85017573170fd8240393b57
|
[
"Apache-2.0"
] | 1,208
|
2018-03-14T09:58:49.000Z
|
2022-03-31T17:56:20.000Z
|
tf2onnx/tflite/LessOptions.py
|
LoicDagnas/tensorflow-onnx
|
6691850e79047d05d85017573170fd8240393b57
|
[
"Apache-2.0"
] | 350
|
2018-04-03T03:48:40.000Z
|
2022-03-30T11:23:55.000Z
|
# SPDX-License-Identifier: Apache-2.0
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class LessOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LessOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsLessOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# LessOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def Start(builder): builder.StartObject(0)
def LessOptionsStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def End(builder): return builder.EndObject()
def LessOptionsEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder)
| 32.125
| 114
| 0.705837
|
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class LessOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LessOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsLessOptions(cls, buf, offset=0):
return cls.GetRootAs(buf, offset)
@classmethod
def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def Start(builder): builder.StartObject(0)
def LessOptionsStart(builder):
return Start(builder)
def End(builder): return builder.EndObject()
def LessOptionsEnd(builder):
return End(builder)
| true
| true
|
79070d96eac5ea66dfc9f2206e334c33bd46075f
| 1,285
|
py
|
Python
|
selfdrive/loggerd/deleter.py
|
JoeOIVOV/ArnePilot
|
82c71c6f5af1ba504b748940f22cc0ac98692662
|
[
"MIT"
] | 116
|
2018-03-07T09:00:10.000Z
|
2020-04-06T18:37:45.000Z
|
selfdrive/loggerd/deleter.py
|
JoeOIVOV/ArnePilot
|
82c71c6f5af1ba504b748940f22cc0ac98692662
|
[
"MIT"
] | 66
|
2020-04-09T20:27:57.000Z
|
2022-01-27T14:39:24.000Z
|
selfdrive/loggerd/deleter.py
|
JoeOIVOV/ArnePilot
|
82c71c6f5af1ba504b748940f22cc0ac98692662
|
[
"MIT"
] | 154
|
2020-04-08T21:41:22.000Z
|
2022-03-17T21:05:33.000Z
|
#!/usr/bin/env python3
import os
import shutil
import threading
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import ROOT, get_available_bytes, get_available_percent
from selfdrive.loggerd.uploader import listdir_by_creation
from selfdrive.dragonpilot.dashcam import DASHCAM_FREESPACE_LIMIT
MIN_BYTES = 5 * 1024 * 1024 * 1024
MIN_PERCENT = 10 + (DASHCAM_FREESPACE_LIMIT * 100)
def deleter_thread(exit_event):
while not exit_event.is_set():
out_of_bytes = get_available_bytes(default=MIN_BYTES + 1) < MIN_BYTES
out_of_percent = get_available_percent(default=MIN_PERCENT + 1) < MIN_PERCENT
if out_of_percent or out_of_bytes:
# remove the earliest directory we can
dirs = listdir_by_creation(ROOT)
for delete_dir in dirs:
delete_path = os.path.join(ROOT, delete_dir)
if any(name.endswith(".lock") for name in os.listdir(delete_path)):
continue
try:
cloudlog.info("deleting %s" % delete_path)
shutil.rmtree(delete_path)
break
except OSError:
cloudlog.exception("issue deleting %s" % delete_path)
exit_event.wait(.1)
else:
exit_event.wait(30)
def main():
deleter_thread(threading.Event())
if __name__ == "__main__":
main()
| 28.555556
| 85
| 0.714397
|
import os
import shutil
import threading
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import ROOT, get_available_bytes, get_available_percent
from selfdrive.loggerd.uploader import listdir_by_creation
from selfdrive.dragonpilot.dashcam import DASHCAM_FREESPACE_LIMIT
MIN_BYTES = 5 * 1024 * 1024 * 1024
MIN_PERCENT = 10 + (DASHCAM_FREESPACE_LIMIT * 100)
def deleter_thread(exit_event):
while not exit_event.is_set():
out_of_bytes = get_available_bytes(default=MIN_BYTES + 1) < MIN_BYTES
out_of_percent = get_available_percent(default=MIN_PERCENT + 1) < MIN_PERCENT
if out_of_percent or out_of_bytes:
dirs = listdir_by_creation(ROOT)
for delete_dir in dirs:
delete_path = os.path.join(ROOT, delete_dir)
if any(name.endswith(".lock") for name in os.listdir(delete_path)):
continue
try:
cloudlog.info("deleting %s" % delete_path)
shutil.rmtree(delete_path)
break
except OSError:
cloudlog.exception("issue deleting %s" % delete_path)
exit_event.wait(.1)
else:
exit_event.wait(30)
def main():
deleter_thread(threading.Event())
if __name__ == "__main__":
main()
| true
| true
|
79070deeea73b6884a348bbda16cd804b305ec0a
| 1,614
|
py
|
Python
|
application/server/main.py
|
EgoPro1/InceptionV2
|
c24c7ded53445fe409aaa46b7eeabeb93a7b0ef7
|
[
"MIT"
] | 1
|
2021-09-04T23:15:43.000Z
|
2021-09-04T23:15:43.000Z
|
application/server/main.py
|
EgoPro1/InceptionV2
|
c24c7ded53445fe409aaa46b7eeabeb93a7b0ef7
|
[
"MIT"
] | null | null | null |
application/server/main.py
|
EgoPro1/InceptionV2
|
c24c7ded53445fe409aaa46b7eeabeb93a7b0ef7
|
[
"MIT"
] | null | null | null |
import uvicorn
from fastapi import (FastAPI, File, UploadFile)
from starlette.responses import RedirectResponse
from tensorflow.python.keras.preprocessing import image as imgx
import requests
from PIL import Image
from application.components import predict, read_imagefile
from application.schema import Symptom
from application.components.prediction import symptom_check
from googletrans import Translator, constants
from pprint import pprint
app_desc = """<h2>Try this app by uploading any image with `predict/image`</h2>
<h2>Analize photos</h2>
<br>Template by Aniket Maurya, new version by Joaquin Egocheaga"""
app = FastAPI(title='Comparizy , Tensorflow FastAPI ', description=app_desc)
translator = Translator()
@app.get("/", include_in_schema=False)
async def index():
return RedirectResponse(url="/docs")
@app.post("/predict/image")
async def predict_api(file: UploadFile = File(...)):
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
print(file.filename)
print(extension)
if not extension:
return "Image must be jpg or png format!"
image = read_imagefile(await file.read())
prediction = predict(image)
clase=prediction[0]['class']
clase=clase.replace("_", " ")
print(clase)
print("X")
translation = translator.translate(clase, "es")
translation=translation.text
print(translation)
return translation
@app.post("/api/covid-symptom-check")
def check_risk(symptom: Symptom):
return symptom_check.get_risk_level(symptom)
if __name__ == "__main__":
uvicorn.run(app, debug=True)
| 28.821429
| 79
| 0.724907
|
import uvicorn
from fastapi import (FastAPI, File, UploadFile)
from starlette.responses import RedirectResponse
from tensorflow.python.keras.preprocessing import image as imgx
import requests
from PIL import Image
from application.components import predict, read_imagefile
from application.schema import Symptom
from application.components.prediction import symptom_check
from googletrans import Translator, constants
from pprint import pprint
app_desc = """<h2>Try this app by uploading any image with `predict/image`</h2>
<h2>Analize photos</h2>
<br>Template by Aniket Maurya, new version by Joaquin Egocheaga"""
app = FastAPI(title='Comparizy , Tensorflow FastAPI ', description=app_desc)
translator = Translator()
@app.get("/", include_in_schema=False)
async def index():
return RedirectResponse(url="/docs")
@app.post("/predict/image")
async def predict_api(file: UploadFile = File(...)):
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
print(file.filename)
print(extension)
if not extension:
return "Image must be jpg or png format!"
image = read_imagefile(await file.read())
prediction = predict(image)
clase=prediction[0]['class']
clase=clase.replace("_", " ")
print(clase)
print("X")
translation = translator.translate(clase, "es")
translation=translation.text
print(translation)
return translation
@app.post("/api/covid-symptom-check")
def check_risk(symptom: Symptom):
return symptom_check.get_risk_level(symptom)
if __name__ == "__main__":
uvicorn.run(app, debug=True)
| true
| true
|
79070e2d3f5dc8ddb6b9307a549f19b4bd0e6bb5
| 26,734
|
py
|
Python
|
pynq/lib/logictools/tests/test_fsm_generator.py
|
michalkouril/PYNQ
|
c72febc2decc83816f40b91a7f60e11fe707c248
|
[
"BSD-3-Clause"
] | 1,537
|
2016-09-26T22:51:50.000Z
|
2022-03-31T13:33:54.000Z
|
pynq/lib/logictools/tests/test_fsm_generator.py
|
michalkouril/PYNQ
|
c72febc2decc83816f40b91a7f60e11fe707c248
|
[
"BSD-3-Clause"
] | 414
|
2016-10-03T21:12:10.000Z
|
2022-03-21T14:55:02.000Z
|
pynq/lib/logictools/tests/test_fsm_generator.py
|
michalkouril/PYNQ
|
c72febc2decc83816f40b91a7f60e11fe707c248
|
[
"BSD-3-Clause"
] | 826
|
2016-09-23T22:29:43.000Z
|
2022-03-29T11:02:09.000Z
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder no r the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
"""Build an FSM spec with 4 states.
The FSM built has 2 inputs, 1 output, and 4 states. It acts like a
2-bit counter, where the output goes to high only if the FSM is in the
final state.
When the direction pin is low, the counter counts up; if it is high, the
counter counts down.
Parameters
----------
direction_logic_value : int
The logic value of the direction pin.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output pattern corresponding to the direction value.
list
The state bit0 pattern corresponding to the direction value.
list
The state bit1 pattern corresponding to the direction value.
"""
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
"""Build an FSM spec with the specified number of states.
The FSM spec exploits only single input and single output. As a side
product, a list of output patterns are also returned.
Parameters
----------
num_states : int
The number of states of the FSM.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
"""Build an FSM spec using a maximum number of inputs and outputs.
The returned FSM spec has a maximum number of inputs and
outputs. At the same time, the largest available number of
states will be implemented. For example, on PYNQ-Z1, if
FSM_MAX_INPUT_BITS = 8, and FSM_MAX_STATE_INPUT_BITS = 13, we will
implement 2**(13-8)-1 = 31 states. This is the largest number of states
available for this setup, since there is always 1 dummy state that has
to be reserved.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
"""Build a spec that results in a free-running FSM.
This will return an FSM spec with no given inputs.
In this case, the FSM is a free running state machine.
A maximum number of states are deployed.
Returns
-------
dict
The FSM spec that can be consumed by the FSM generator.
list
The output patterns associated with this FSM spec.
"""
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
"""Test for the Finite State Machine Generator class.
In this test, the pattern generated by the FSM will be compared with the
one specified. We will test a minimum number of (FSM period + 1) samples,
and a maximum number of samples. 10MHz and 100MHz clocks are tested
for each case.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
"""Test for the Finite State Machine Generator class.
This test is similar to the first test, but in this test,
we will test the case when the state bits are also used as outputs.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
"""Test for the Finite State Machine Generator class.
This test is similar to the above test, but in this test,
we will test the `step()` method, and ask users to change the input
logic values in the middle of the test.
"""
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
"""Test for the Finite State Machine Generator class.
This is similar to the first test, but in this test,
we will test the case when no analyzer is specified.
"""
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
"""Test for the Finite State Machine Generator class.
The 4th test will check 1 and (MAX_NUM_STATES + 1) states.
These cases should raise exceptions. For these tests, we use the minimum
number of input and output pins.
"""
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
"""Test for the Finite State Machine Generator class.
This test will check 2 and MAX_NUM_STATES states.
These cases should be able to pass random tests.
For these tests, we use the minimum number of input and output pins.
"""
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
"""Test for the Finite State Machine Generator class.
This test will test when maximum number of inputs and
outputs are used. At the same time, the largest available number of
states will be implemented.
"""
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
"""Test for the Finite State Machine Generator class.
This will examine a special scenario where no inputs are given.
In this case, the FSM is a free running state machine. Since the FSM
specification requires at least 1 input pin to be specified, 1 pin can
be used as `don't care` input, while all the other pins are used as
outputs. A maximum number of states are deployed.
"""
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
| 39.842027
| 79
| 0.625533
|
from random import randint
from math import ceil
import numpy as np
import pytest
from pynq import Overlay
from pynq.tests.util import user_answer_yes
from pynq.lib.logictools.waveform import bitstring_to_int
from pynq.lib.logictools.waveform import wave_to_bitstring
from pynq.lib.logictools import FSMGenerator
from pynq.lib.logictools import ARDUINO
from pynq.lib.logictools import PYNQZ1_LOGICTOOLS_SPECIFICATION
from pynq.lib.logictools import MAX_NUM_TRACE_SAMPLES
from pynq.lib.logictools import FSM_MIN_NUM_STATES
from pynq.lib.logictools import FSM_MAX_NUM_STATES
from pynq.lib.logictools import FSM_MAX_INPUT_BITS
from pynq.lib.logictools import FSM_MAX_STATE_INPUT_BITS
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
ol = Overlay('logictools.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nTest Finite State Machine (FSM) generator?")
if flag1:
mb_info = ARDUINO
flag = flag0 and flag1
pin_dict = PYNQZ1_LOGICTOOLS_SPECIFICATION['traceable_outputs']
interface_width = PYNQZ1_LOGICTOOLS_SPECIFICATION['interface_width']
def build_fsm_spec_4_state(direction_logic_value):
out, rst, direction = list(pin_dict.keys())[0:3]
fsm_spec_4_state = {'inputs': [('rst', rst), ('direction', direction)],
'outputs': [('test', out)],
'states': ['S0', 'S1', 'S2', 'S3'],
'transitions': [['00', 'S0', 'S1', '0'],
['01', 'S0', 'S3', '0'],
['00', 'S1', 'S2', '0'],
['01', 'S1', 'S0', '0'],
['00', 'S2', 'S3', '0'],
['01', 'S2', 'S1', '0'],
['00', 'S3', 'S0', '1'],
['01', 'S3', 'S2', '1'],
['1-', '*', 'S0', '']]}
if not direction_logic_value:
output_pattern = [0, 0, 0, 1]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 0, 1, 1]
else:
output_pattern = [0, 1, 0, 0]
state_bit0_pattern = [0, 1, 0, 1]
state_bit1_pattern = [0, 1, 1, 0]
return fsm_spec_4_state, \
output_pattern, state_bit0_pattern, state_bit1_pattern
def build_fsm_spec_random(num_states):
input_pin, output_pin = list(pin_dict.keys())[0:2]
if num_states == 1:
return {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': ['S0'],
'transitions': [['1', '*', 'S0', '']]}, None
else:
fsm_spec_state = {'inputs': [('rst', input_pin)],
'outputs': [('test', output_pin)],
'states': [],
'transitions': [['1', '*', 'S0', '']]}
output_pattern_list = list()
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i+1) % num_states)
fsm_spec_state['states'] += [current_state]
output_pattern = '{}'.format(randint(0, 1))
transition = ['0', current_state, next_state, output_pattern]
fsm_spec_state['transitions'] += [transition]
output_pattern_list.append(int(output_pattern))
return fsm_spec_state, output_pattern_list
def build_fsm_spec_max_in_out():
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
output_pins = list(pin_dict.keys())[FSM_MAX_INPUT_BITS:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': [['1' * len(input_pins), '*', 'S0', '']]}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
for i in range(len(input_pins)):
fsm_spec_inout['inputs'].append(('input{}'.format(i),
input_pins[i]))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['0' * len(input_pins), current_state, next_state,
output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
def build_fsm_spec_free_run():
input_pin = list(pin_dict.keys())[0]
output_pins = list(pin_dict.keys())[1:interface_width]
fsm_spec_inout = {'inputs': [],
'outputs': [],
'states': [],
'transitions': []}
test_lanes = [[] for _ in range(len(output_pins))]
num_states = FSM_MAX_NUM_STATES
fsm_spec_inout['inputs'].append(('input0', input_pin))
for i in range(len(output_pins)):
fsm_spec_inout['outputs'].append(('output{}'.format(i),
output_pins[i]))
for i in range(num_states):
current_state = 'S{}'.format(i)
next_state = 'S{}'.format((i + 1) % num_states)
fsm_spec_inout['states'].append(current_state)
output_pattern = ''
for test_lane in test_lanes:
random_1bit = '{}'.format(randint(0, 1))
output_pattern += random_1bit
test_lane += random_1bit
transition = ['-', current_state, next_state, output_pattern]
fsm_spec_inout['transitions'].append(transition)
test_patterns = []
for i in range(len(output_pins)):
temp_string = ''.join(test_lanes[i])
test_patterns.append(np.array(bitstring_to_int(
wave_to_bitstring(temp_string))))
return fsm_spec_inout, test_patterns
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_samples():
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, _, _ = build_fsm_spec_4_state(1)
fsm_period = len(fsm_spec_4_state['states'])
for num_samples in [fsm_period, MAX_NUM_TRACE_SAMPLES]:
test_tile = np.array(output_pattern)
golden_test_array = np.tile(test_tile, ceil(num_samples / 4))
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
assert fsm_generator.status == 'RESET'
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=num_samples)
fsm_generator.setup(fsm_spec_4_state,
frequency_mhz=fsm_frequency_mhz)
assert fsm_generator.status == 'READY'
assert 'bram_data_buf' not in \
fsm_generator.logictools_controller.buffers, \
'bram_data_buf is not freed after use.'
fsm_generator.run()
assert fsm_generator.status == 'RUNNING'
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
assert np.array_equal(test_array,
golden_test_array[:num_samples]), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
assert fsm_generator.status == 'READY'
fsm_generator.reset()
assert fsm_generator.status == 'RESET'
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_state_bits():
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("\nConnect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
fsm_spec_4_state, output_pattern, \
state_bit0_pattern, state_bit1_pattern = build_fsm_spec_4_state(0)
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern)
golden_state_bit0_array = np.array(state_bit0_pattern)
golden_state_bit1_array = np.array(state_bit1_pattern)
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
fsm_generator.run()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when running at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_step():
ol.download()
rst, direction = list(pin_dict.keys())[1:3]
print("")
fsm_spec_4_state, output_pattern_up, \
state_bit0_pattern_up, \
state_bit1_pattern_up = build_fsm_spec_4_state(0)
_, output_pattern_down, \
state_bit0_pattern_down, \
state_bit1_pattern_down = build_fsm_spec_4_state(1)
output_pattern_down.append(output_pattern_down.pop(0))
state_bit0_pattern_down.append(state_bit0_pattern_down.pop(0))
state_bit1_pattern_down.append(state_bit1_pattern_down.pop(0))
fsm_period = len(fsm_spec_4_state['states'])
golden_test_array = np.array(output_pattern_up +
output_pattern_down[1:])
golden_state_bit0_array = np.array(state_bit0_pattern_up +
state_bit0_pattern_down[1:])
golden_state_bit1_array = np.array(state_bit1_pattern_up +
state_bit1_pattern_down[1:])
for fsm_frequency_mhz in [10, 100]:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=fsm_period)
fsm_generator.setup(fsm_spec_4_state,
use_state_bits=True,
frequency_mhz=fsm_frequency_mhz)
print("Connect both {} and {} to GND.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_up)-1):
fsm_generator.step()
print("Connect {} to GND, and {} to VCC.".format(rst, direction))
input("Hit enter after done ...")
for _ in range(len(output_pattern_down)):
fsm_generator.step()
test_string = state_bit0_string = state_bit1_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
if wavelane['name'] == 'state_bit0':
state_bit0_string = wavelane['wave']
if wavelane['name'] == 'state_bit1':
state_bit1_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
state_bit0_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit0_string)))
state_bit1_array = np.array(bitstring_to_int(
wave_to_bitstring(state_bit1_string)))
assert np.array_equal(golden_test_array, test_array), \
'Data pattern not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit0_array, state_bit0_array), \
'State bit0 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
assert np.array_equal(golden_state_bit1_array, state_bit1_array), \
'State bit1 not correct when stepping at {}MHz.'.format(
fsm_frequency_mhz)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_no_trace():
ol.download()
fsm_spec_4_state, _, _, _ = build_fsm_spec_4_state(0)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=False)
fsm_generator.setup(fsm_spec_4_state)
fsm_generator.run()
exception_raised = False
try:
fsm_generator.show_waveform()
except ValueError:
exception_raised = True
assert exception_raised, 'Should raise exception for show_waveform().'
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states1():
ol.download()
fsm_generator = None
exception_raised = False
fsm_spec_less_than_min_state, _ = build_fsm_spec_random(
FSM_MIN_NUM_STATES - 1)
fsm_spec_more_than_max_state, _ = build_fsm_spec_random(
FSM_MAX_NUM_STATES + 1)
for fsm_spec in [fsm_spec_less_than_min_state,
fsm_spec_more_than_max_state]:
num_states = len(fsm_spec['states'])
try:
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec)
except ValueError:
exception_raised = True
assert exception_raised, \
'Should raise exception when ' \
'there are {} states in the FSM.'.format(num_states)
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_num_states2():
ol.download()
input_pin = list(pin_dict.keys())[0]
print("\nConnect {} to GND, and disconnect other pins.".format(input_pin))
input("Hit enter after done ...")
for num_states in [2, FSM_MAX_NUM_STATES]:
fsm_spec, test_pattern = build_fsm_spec_random(num_states)
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec, frequency_mhz=100)
fsm_generator.run()
test_string = ''
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
if wavelane['name'] == 'test':
test_string = wavelane['wave']
test_array = np.array(bitstring_to_int(
wave_to_bitstring(test_string)))
period = num_states
test_tile = np.array(test_pattern)
golden_test_array = np.tile(test_tile,
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_array,
golden_test_array[:MAX_NUM_TRACE_SAMPLES]), \
'Analysis not matching the generated pattern.'
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_max_in_out():
ol.download()
input_pins = list(pin_dict.keys())[:FSM_MAX_INPUT_BITS]
print("\nConnect {} to GND.".format(input_pins))
print("Disconnect all other pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_max_in_out()
period = 2 ** (FSM_MAX_STATE_INPUT_BITS - FSM_MAX_INPUT_BITS) - 1
num_output_pins = interface_width - FSM_MAX_INPUT_BITS
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=MAX_NUM_TRACE_SAMPLES)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = [[] for _ in range(num_output_pins)]
for i in range(num_output_pins):
golden_arrays[i] = np.tile(test_patterns[i],
ceil(MAX_NUM_TRACE_SAMPLES / period))
assert np.array_equal(test_arrays[i],
golden_arrays[i][:MAX_NUM_TRACE_SAMPLES]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
@pytest.mark.skipif(not flag, reason="need correct overlay to run")
def test_fsm_free_run():
ol.download()
print("\nDisconnect all the pins.")
input("Hit enter after done ...")
fsm_spec_inout, test_patterns = build_fsm_spec_free_run()
period = FSM_MAX_NUM_STATES
num_output_pins = interface_width - 1
fsm_generator = FSMGenerator(mb_info)
fsm_generator.trace(use_analyzer=True,
num_analyzer_samples=period)
fsm_generator.setup(fsm_spec_inout, frequency_mhz=100)
fsm_generator.run()
test_strings = ['' for _ in range(num_output_pins)]
test_arrays = [[] for _ in range(num_output_pins)]
for wavegroup in fsm_generator.waveform.waveform_dict['signal']:
if wavegroup and wavegroup[0] == 'analysis':
for wavelane in wavegroup[1:]:
for j in range(num_output_pins):
if wavelane['name'] == 'output{}'.format(j):
test_strings[j] = wavelane['wave']
test_arrays[j] = np.array(bitstring_to_int(
wave_to_bitstring(test_strings[j])))
break
golden_arrays = test_patterns
for i in range(num_output_pins):
assert np.array_equal(test_arrays[i], golden_arrays[i]), \
'Output{} not matching the generated pattern.'.format(i)
fsm_generator.stop()
fsm_generator.reset()
del fsm_generator
| true
| true
|
79070f25dcdb2e976bc31713d7f6ab46debfc137
| 2,487
|
py
|
Python
|
medium/python3/c0288_609_find-duplicate-file-in-system/00_leetcode_0288.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | null | null | null |
medium/python3/c0288_609_find-duplicate-file-in-system/00_leetcode_0288.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | null | null | null |
medium/python3/c0288_609_find-duplicate-file-in-system/00_leetcode_0288.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | 3
|
2018-02-09T02:46:48.000Z
|
2021-02-20T08:32:03.000Z
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#609. Find Duplicate File in System
#Given a list of directory info including directory path, and all the files with contents in this directory, you need to find out all the groups of duplicate files in the file system in terms of their paths.
#A group of duplicate files consists of at least two files that have exactly the same content.
#A single directory info string in the input list has the following format:
#"root/d1/d2/.../dm f1.txt(f1_content) f2.txt(f2_content) ... fn.txt(fn_content)"
#It means there are n files (f1.txt, f2.txt ... fn.txt with content f1_content, f2_content ... fn_content, respectively) in directory root/d1/d2/.../dm. Note that n >= 1 and m >= 0. If m = 0, it means the directory is just the root directory.
#The output is a list of group of duplicate file paths. For each group, it contains all the file paths of the files that have the same content. A file path is a string that has the following format:
#"directory_path/file_name.txt"
#Example 1:
#Input:
#["root/a 1.txt(abcd) 2.txt(efgh)", "root/c 3.txt(abcd)", "root/c/d 4.txt(efgh)", "root 4.txt(efgh)"]
#Output:
#[["root/a/2.txt","root/c/d/4.txt","root/4.txt"],["root/a/1.txt","root/c/3.txt"]]
#Note:
#No order is required for the final output.
#You may assume the directory name, file name and file content only has letters and digits, and the length of file content is in the range of [1,50].
#The number of files given is in the range of [1,20000].
#You may assume no files or directories share the same name in the same directory.
#You may assume each given directory info represents a unique directory. Directory path and file info are separated by a single blank space.
#Follow-up beyond contest:
#Imagine you are given a real file system, how will you search files? DFS or BFS?
#If the file content is very large (GB level), how will you modify your solution?
#If you can only read the file by 1kb each time, how will you modify your solution?
#What is the time complexity of your modified solution? What is the most time-consuming part and memory consuming part of it? How to optimize?
#How to make sure the duplicated files you find are not false positive?
#class Solution:
# def findDuplicate(self, paths):
# """
# :type paths: List[str]
# :rtype: List[List[str]]
# """
# Time Is Money
| 65.447368
| 242
| 0.737837
|
# :type paths: List[str]
# :rtype: List[List[str]]
# """
| true
| true
|
79070f851445a53e05d0643bcd5bbf8d376690ef
| 6,665
|
py
|
Python
|
grr/gui/api_regression_http.py
|
nickamon/grr
|
ad1936c74728de00db90f6fafa47892b54cfc92d
|
[
"Apache-2.0"
] | null | null | null |
grr/gui/api_regression_http.py
|
nickamon/grr
|
ad1936c74728de00db90f6fafa47892b54cfc92d
|
[
"Apache-2.0"
] | 1
|
2018-05-08T21:15:51.000Z
|
2018-05-08T21:15:51.000Z
|
grr/gui/api_regression_http.py
|
nickamon/grr
|
ad1936c74728de00db90f6fafa47892b54cfc92d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Base test classes for API handlers tests."""
# pylint:mode=test
import json
import logging
import os
import threading
import portpicker
import requests
from google.protobuf import json_format
from grr import gui
from grr_api_client.connectors import http_connector
from grr.gui import api_auth_manager
from grr.gui import api_call_router
from grr.gui import api_value_renderers
from grr.gui import http_api
from grr.gui import wsgiapp_testlib
from grr.lib import flags
from grr.lib import utils
from grr.server import data_store
from grr.test_lib import test_lib
DOCUMENT_ROOT = os.path.join(os.path.dirname(gui.__file__), "static")
_HTTP_ENDPOINTS = {}
_HTTP_ENDPOINTS_LOCK = threading.RLock()
class HttpApiRegressionTestMixinBase(object):
"""Load only API E2E test cases."""
api_version = None
read_from_relational_db = False
_get_connector_lock = threading.RLock()
@staticmethod
def GetConnector(api_version):
if api_version not in [1, 2]:
raise ValueError("api_version may be 1 or 2 only")
with _HTTP_ENDPOINTS_LOCK:
if api_version not in _HTTP_ENDPOINTS:
port = portpicker.PickUnusedPort()
logging.info("Picked free AdminUI port %d.", port)
# Force creation of new APIAuthorizationManager.
api_auth_manager.APIACLInit.InitApiAuthManager()
trd = wsgiapp_testlib.ServerThread(port)
trd.StartAndWaitUntilServing()
_HTTP_ENDPOINTS[api_version] = "http://localhost:%d" % port
return http_connector.HttpConnector(
api_endpoint=_HTTP_ENDPOINTS[api_version])
def setUp(self):
super(HttpApiRegressionTestMixinBase, self).setUp()
self.connector = self.GetConnector(self.__class__.api_version)
if (not getattr(self, "aff4_only_test", False) and
self.__class__.read_from_relational_db):
self.db_config_overrider = test_lib.ConfigOverrider({
"Database.useForReads": True
})
self.db_config_overrider.Start()
else:
self.db_config_overrider = None
def tearDown(self):
super(HttpApiRegressionTestMixinBase, self).tearDown()
if self.db_config_overrider:
self.db_config_overrider.Stop()
def _ParseJSON(self, json_str):
"""Parses response JSON."""
xssi_prefix = ")]}'\n"
if json_str.startswith(xssi_prefix):
json_str = json_str[len(xssi_prefix):]
return json.loads(json_str)
def _PrepareV1Request(self, method, args=None):
"""Prepares API v1 request for a given method and args."""
args_proto = None
if args:
args_proto = args.AsPrimitiveProto()
request = self.connector.BuildRequest(method, args_proto)
request.url = request.url.replace("/api/v2/", "/api/")
if args and request.data:
body_proto = args.__class__().AsPrimitiveProto()
json_format.Parse(request.data, body_proto)
body_args = args.__class__()
body_args.ParseFromString(body_proto.SerializeToString())
request.data = json.dumps(
api_value_renderers.StripTypeInfo(
api_value_renderers.RenderValue(body_args)),
cls=http_api.JSONEncoderWithRDFPrimitivesSupport)
prepped_request = request.prepare()
return request, prepped_request
def _PrepareV2Request(self, method, args=None):
"""Prepares API v2 request for a given method and args."""
args_proto = None
if args:
args_proto = args.AsPrimitiveProto()
request = self.connector.BuildRequest(method, args_proto)
prepped_request = request.prepare()
return request, prepped_request
def HandleCheck(self, method_metadata, args=None, replace=None):
"""Does regression check for given method, args and a replace function."""
if not replace:
raise ValueError("replace can't be None")
if self.__class__.api_version == 1:
request, prepped_request = self._PrepareV1Request(
method_metadata.name, args=args)
elif self.__class__.api_version == 2:
request, prepped_request = self._PrepareV2Request(
method_metadata.name, args=args)
else:
raise ValueError("api_version may be only 1 or 2, not %d",
flags.FLAGS.api_version)
session = requests.Session()
response = session.send(prepped_request)
check_result = {
"url": replace(prepped_request.path_url),
"method": request.method
}
if request.data:
request_payload = self._ParseJSON(replace(request.data))
if request_payload:
check_result["request_payload"] = request_payload
if (method_metadata.result_type ==
api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE):
check_result["response"] = replace(utils.SmartUnicode(response.content))
else:
check_result["response"] = self._ParseJSON(replace(response.content))
if self.__class__.api_version == 1:
stripped_response = api_value_renderers.StripTypeInfo(
check_result["response"])
if stripped_response != check_result["response"]:
check_result["type_stripped_response"] = stripped_response
return check_result
class HttpApiV1RegressionTestMixin(HttpApiRegressionTestMixinBase):
"""Test class for HTTP v1 protocol."""
connection_type = "http_v1"
skip_legacy_dynamic_proto_tests = False
api_version = 1
def testRelationalDBReadsDisabled(self):
self.assertFalse(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-docs-examples.json")
class HttpApiV2RegressionTestMixin(HttpApiRegressionTestMixinBase):
"""Test class for HTTP v2 protocol."""
connection_type = "http_v2"
skip_legacy_dynamic_proto_tests = True
api_version = 2
def testRelationalDBReadsDisabled(self):
self.assertFalse(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-v2-docs-examples.json")
class HttpApiV2RelationalDBRegressionTestMixin(HttpApiRegressionTestMixinBase):
"""Test class for HTTP v2 protocol with Database.useForReads=True."""
read_from_relational_db = True
connection_type = "http_v2_rel_db"
use_golden_files_of = "http_v2"
skip_legacy_dynamic_proto_tests = True
api_version = 2
def testRelationalDBReadsEnabled(self):
if not getattr(self, "aff4_only_test", False):
self.assertTrue(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-v2-docs-examples.json")
| 31.14486
| 79
| 0.722731
|
import json
import logging
import os
import threading
import portpicker
import requests
from google.protobuf import json_format
from grr import gui
from grr_api_client.connectors import http_connector
from grr.gui import api_auth_manager
from grr.gui import api_call_router
from grr.gui import api_value_renderers
from grr.gui import http_api
from grr.gui import wsgiapp_testlib
from grr.lib import flags
from grr.lib import utils
from grr.server import data_store
from grr.test_lib import test_lib
DOCUMENT_ROOT = os.path.join(os.path.dirname(gui.__file__), "static")
_HTTP_ENDPOINTS = {}
_HTTP_ENDPOINTS_LOCK = threading.RLock()
class HttpApiRegressionTestMixinBase(object):
api_version = None
read_from_relational_db = False
_get_connector_lock = threading.RLock()
@staticmethod
def GetConnector(api_version):
if api_version not in [1, 2]:
raise ValueError("api_version may be 1 or 2 only")
with _HTTP_ENDPOINTS_LOCK:
if api_version not in _HTTP_ENDPOINTS:
port = portpicker.PickUnusedPort()
logging.info("Picked free AdminUI port %d.", port)
api_auth_manager.APIACLInit.InitApiAuthManager()
trd = wsgiapp_testlib.ServerThread(port)
trd.StartAndWaitUntilServing()
_HTTP_ENDPOINTS[api_version] = "http://localhost:%d" % port
return http_connector.HttpConnector(
api_endpoint=_HTTP_ENDPOINTS[api_version])
def setUp(self):
super(HttpApiRegressionTestMixinBase, self).setUp()
self.connector = self.GetConnector(self.__class__.api_version)
if (not getattr(self, "aff4_only_test", False) and
self.__class__.read_from_relational_db):
self.db_config_overrider = test_lib.ConfigOverrider({
"Database.useForReads": True
})
self.db_config_overrider.Start()
else:
self.db_config_overrider = None
def tearDown(self):
super(HttpApiRegressionTestMixinBase, self).tearDown()
if self.db_config_overrider:
self.db_config_overrider.Stop()
def _ParseJSON(self, json_str):
xssi_prefix = ")]}'\n"
if json_str.startswith(xssi_prefix):
json_str = json_str[len(xssi_prefix):]
return json.loads(json_str)
def _PrepareV1Request(self, method, args=None):
args_proto = None
if args:
args_proto = args.AsPrimitiveProto()
request = self.connector.BuildRequest(method, args_proto)
request.url = request.url.replace("/api/v2/", "/api/")
if args and request.data:
body_proto = args.__class__().AsPrimitiveProto()
json_format.Parse(request.data, body_proto)
body_args = args.__class__()
body_args.ParseFromString(body_proto.SerializeToString())
request.data = json.dumps(
api_value_renderers.StripTypeInfo(
api_value_renderers.RenderValue(body_args)),
cls=http_api.JSONEncoderWithRDFPrimitivesSupport)
prepped_request = request.prepare()
return request, prepped_request
def _PrepareV2Request(self, method, args=None):
args_proto = None
if args:
args_proto = args.AsPrimitiveProto()
request = self.connector.BuildRequest(method, args_proto)
prepped_request = request.prepare()
return request, prepped_request
def HandleCheck(self, method_metadata, args=None, replace=None):
if not replace:
raise ValueError("replace can't be None")
if self.__class__.api_version == 1:
request, prepped_request = self._PrepareV1Request(
method_metadata.name, args=args)
elif self.__class__.api_version == 2:
request, prepped_request = self._PrepareV2Request(
method_metadata.name, args=args)
else:
raise ValueError("api_version may be only 1 or 2, not %d",
flags.FLAGS.api_version)
session = requests.Session()
response = session.send(prepped_request)
check_result = {
"url": replace(prepped_request.path_url),
"method": request.method
}
if request.data:
request_payload = self._ParseJSON(replace(request.data))
if request_payload:
check_result["request_payload"] = request_payload
if (method_metadata.result_type ==
api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE):
check_result["response"] = replace(utils.SmartUnicode(response.content))
else:
check_result["response"] = self._ParseJSON(replace(response.content))
if self.__class__.api_version == 1:
stripped_response = api_value_renderers.StripTypeInfo(
check_result["response"])
if stripped_response != check_result["response"]:
check_result["type_stripped_response"] = stripped_response
return check_result
class HttpApiV1RegressionTestMixin(HttpApiRegressionTestMixinBase):
connection_type = "http_v1"
skip_legacy_dynamic_proto_tests = False
api_version = 1
def testRelationalDBReadsDisabled(self):
self.assertFalse(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-docs-examples.json")
class HttpApiV2RegressionTestMixin(HttpApiRegressionTestMixinBase):
connection_type = "http_v2"
skip_legacy_dynamic_proto_tests = True
api_version = 2
def testRelationalDBReadsDisabled(self):
self.assertFalse(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-v2-docs-examples.json")
class HttpApiV2RelationalDBRegressionTestMixin(HttpApiRegressionTestMixinBase):
read_from_relational_db = True
connection_type = "http_v2_rel_db"
use_golden_files_of = "http_v2"
skip_legacy_dynamic_proto_tests = True
api_version = 2
def testRelationalDBReadsEnabled(self):
if not getattr(self, "aff4_only_test", False):
self.assertTrue(data_store.RelationalDBReadEnabled())
@property
def output_file_name(self):
return os.path.join(DOCUMENT_ROOT,
"angular-components/docs/api-v2-docs-examples.json")
| true
| true
|
79070fbf711c1071af30c28295f6d1d93fd1595d
| 2,683
|
py
|
Python
|
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/classifier/numrange.py
|
hectormartinez/rougexstem
|
32da9eab253cb88fc1882e59026e8b5b40900a25
|
[
"Apache-2.0"
] | null | null | null |
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/classifier/numrange.py
|
hectormartinez/rougexstem
|
32da9eab253cb88fc1882e59026e8b5b40900a25
|
[
"Apache-2.0"
] | null | null | null |
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/classifier/numrange.py
|
hectormartinez/rougexstem
|
32da9eab253cb88fc1882e59026e8b5b40900a25
|
[
"Apache-2.0"
] | null | null | null |
# Natural Language Toolkit - Range
# Represents a range of numbers, not an immutable object and can be modified by include
# Capable of performing operations on ranges
#
# Author: Sumukh Ghodke <sumukh dot ghodke at gmail dot com>
#
# URL: <http://nltk.sf.net>
# This software is distributed under GPL, for license information see LICENSE.TXT
from nltk_contrib.classifier.exceptions import systemerror as se
DELTA = 0.000001
class Range:
def __init__(self, lower = 0, upper = 0, upper_includes_max=False):
"""
any number within this range should be greater than or equal to self.lower and
less than (or less than equal to depending on whether it includes the max) self.upper
"""
self.__delta_added = False
if upper < lower:
raise se.SystemError('Lower limit ' + str(lower) + ' cannot be greater than the Upper limit ' + str(upper) + ' in a range')
self.__uninitialized = False
if upper == lower == 0:
self.__uninitialized = True
self.lower, self.upper, self.__delta_added = lower, upper, False
if upper_includes_max:
self.upper += DELTA
self.__delta_added = True
def include(self, number):
if self.__uninitialized:
self.lower, self.upper = number, number
self.__uninitialized = False
if number >= self.upper:
self.__delta_added = True
self.upper = number + DELTA
elif number < self.lower:
self.lower = number
def includes(self, number):
return self.lower <= number and self.upper > number
def split(self, parts):
if self.lower == self.upper: return None
size = self.upper - self.lower
max_limit = self.upper
if self.__delta_added:
size -= DELTA
max_limit -= DELTA
each = size / parts
if each < DELTA:
raise se.SystemError('Splitting of range resulted in elements smaller than delta ' + str(DELTA) + '.')
lower, ranges = self.lower, []
for i in range(parts - 1):
ranges.append(Range(lower, lower + each))
lower += each
ranges.append(Range(lower, self.upper))
return ranges
def __eq__(self, other):
if other is None: return False
if self.__class__ != other.__class__ : return False
if self.lower == other.lower and self.upper == other.upper: return True
return False
def __hash__(self):
return hash(self.lower) + hash(self.upper)
def __str__(self):
return '[' + str(self.lower) + ',' + str(self.upper) + ']'
| 38.328571
| 135
| 0.611256
|
from nltk_contrib.classifier.exceptions import systemerror as se
DELTA = 0.000001
class Range:
def __init__(self, lower = 0, upper = 0, upper_includes_max=False):
self.__delta_added = False
if upper < lower:
raise se.SystemError('Lower limit ' + str(lower) + ' cannot be greater than the Upper limit ' + str(upper) + ' in a range')
self.__uninitialized = False
if upper == lower == 0:
self.__uninitialized = True
self.lower, self.upper, self.__delta_added = lower, upper, False
if upper_includes_max:
self.upper += DELTA
self.__delta_added = True
def include(self, number):
if self.__uninitialized:
self.lower, self.upper = number, number
self.__uninitialized = False
if number >= self.upper:
self.__delta_added = True
self.upper = number + DELTA
elif number < self.lower:
self.lower = number
def includes(self, number):
return self.lower <= number and self.upper > number
def split(self, parts):
if self.lower == self.upper: return None
size = self.upper - self.lower
max_limit = self.upper
if self.__delta_added:
size -= DELTA
max_limit -= DELTA
each = size / parts
if each < DELTA:
raise se.SystemError('Splitting of range resulted in elements smaller than delta ' + str(DELTA) + '.')
lower, ranges = self.lower, []
for i in range(parts - 1):
ranges.append(Range(lower, lower + each))
lower += each
ranges.append(Range(lower, self.upper))
return ranges
def __eq__(self, other):
if other is None: return False
if self.__class__ != other.__class__ : return False
if self.lower == other.lower and self.upper == other.upper: return True
return False
def __hash__(self):
return hash(self.lower) + hash(self.upper)
def __str__(self):
return '[' + str(self.lower) + ',' + str(self.upper) + ']'
| true
| true
|
79071029fd42374964d12f513e9c510bdc7400eb
| 10,072
|
py
|
Python
|
tensorflow/python/kernel_tests/variable_ops_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 522
|
2016-06-08T02:15:50.000Z
|
2022-03-02T05:30:36.000Z
|
tensorflow/python/kernel_tests/variable_ops_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 48
|
2016-07-26T00:11:55.000Z
|
2022-02-23T13:36:33.000Z
|
tensorflow/python/kernel_tests/variable_ops_test.py
|
AlexChrisF/udacity
|
b7f85a74058fc63ccb7601c418450ab934ef5953
|
[
"Apache-2.0"
] | 108
|
2016-06-16T15:34:05.000Z
|
2022-03-12T13:23:11.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.variable_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_NP_TO_TF = {
np.float32: dtypes.float32,
np.float64: dtypes.float64,
np.int32: dtypes.int32,
np.int64: dtypes.int64,
}
class VariableOpTest(test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
op = state_ops.assign(p, x)
op.op.run()
return p.eval()
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
self.setUp()
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
# NOTE(touts): the GPU test should pass for all types, whether the
# Variable op has an implementation for that type on GPU as we expect
# that Variable and Assign have GPU implementations for matching tf.
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testset_shape(self):
p = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual([1, 2], p.get_shape())
p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
def testAssign(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
tensor = array_ops.placeholder(dtypes.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
assigned = state_ops.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
self.assertEqual(shape, var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
def testAssignNoShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
state_ops.assign(var, value).get_shape())
def testAssignNoShapeNoValidateShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(
tensor_shape.unknown_shape(),
state_ops.assign(
var, value, validate_shape=False).get_shape())
def testAssignUpdate(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
def testTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], final.eval())
def testDestroyNonexistentTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
final.eval()
def testDuplicateTemporaryVariable(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
final.eval()
def testDestroyTemporaryVariableTwice(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
final.eval()
def testTemporaryVariableNoLeak(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="bar")
final = array_ops.identity(var)
final.eval()
def testTwoTemporaryVariablesNoLeaks(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="var1")
var2 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="var2")
final = var1 + var2
final.eval()
def testAssignDependencyAcrossDevices(self):
with self.test_session(use_gpu=True):
# The variable and an op to increment it are on the GPU.
var = state_ops.variable_op([1], dtypes.float32)
state_ops.assign(var, [1.0]).eval()
increment = state_ops.assign_add(var, [1.0])
with ops.control_dependencies([increment]):
with ops.device("/cpu:0"):
# This mul op is pinned to the CPU, but reads the variable from the
# GPU. The test ensures that the dependency on 'increment' is still
# honored, i.e., the Send and Recv from GPU to CPU should take place
# only after the increment.
result = math_ops.multiply(var, var)
self.assertAllClose([4.0], result.eval())
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual(False, variables.is_variable_initialized(v0).eval())
state_ops.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, variables.is_variable_initialized(v0).eval())
if __name__ == "__main__":
test.main()
| 41.110204
| 80
| 0.700655
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
_NP_TO_TF = {
np.float32: dtypes.float32,
np.float64: dtypes.float64,
np.int32: dtypes.int32,
np.int64: dtypes.int64,
}
class VariableOpTest(test.TestCase):
def _initFetch(self, x, tftype, use_gpu=None):
with self.test_session(use_gpu=use_gpu):
p = state_ops.variable_op(x.shape, tftype)
op = state_ops.assign(p, x)
op.op.run()
return p.eval()
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
self.setUp()
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testset_shape(self):
p = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual([1, 2], p.get_shape())
p = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), p.get_shape())
def testAssign(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32)
self.assertShapeEqual(value, var)
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value)
self.assertShapeEqual(value, assigned)
def testAssignNoVarShapeNoValidateShape(self):
value = np.array([[42.0, 43.0]])
var = state_ops.variable_op(value.shape, dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertShapeEqual(value, assigned)
def _NewShapelessTensor(self):
tensor = array_ops.placeholder(dtypes.float32)
self.assertEqual(tensor_shape.unknown_shape(), tensor.get_shape())
return tensor
def testAssignNoValueShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
assigned = state_ops.assign(var, value)
self.assertEqual(shape, var.get_shape())
self.assertEqual(shape, assigned.get_shape())
def testAssignNoValueShapeNoValidateShape(self):
value = self._NewShapelessTensor()
shape = [1, 2]
var = state_ops.variable_op(shape, dtypes.float32)
self.assertEqual(shape, var.get_shape())
assigned = state_ops.assign(var, value, validate_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), assigned.get_shape())
def testAssignNoShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(tensor_shape.unknown_shape(),
state_ops.assign(var, value).get_shape())
def testAssignNoShapeNoValidateShape(self):
with self.test_session():
value = self._NewShapelessTensor()
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
self.assertEqual(tensor_shape.unknown_shape(), var.get_shape())
self.assertEqual(
tensor_shape.unknown_shape(),
state_ops.assign(
var, value, validate_shape=False).get_shape())
def testAssignUpdate(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], dtypes.float32, set_shape=False)
added = state_ops.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = state_ops.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
def testTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="foo")
var = state_ops.assign(var, [[4.0, 5.0]])
var = state_ops.assign_add(var, [[6.0, 7.0]])
final = gen_state_ops._destroy_temporary_variable(var, var_name="foo")
self.assertAllClose([[10.0, 12.0]], final.eval())
def testDestroyNonexistentTemporaryVariable(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
final = gen_state_ops._destroy_temporary_variable(var, var_name="bad")
with self.assertRaises(errors.NotFoundError):
final.eval()
def testDuplicateTemporaryVariable(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var1 = state_ops.assign(var1, [[1.0, 2.0]])
var2 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="dup")
var2 = state_ops.assign(var2, [[3.0, 4.0]])
final = var1 + var2
with self.assertRaises(errors.AlreadyExistsError):
final.eval()
def testDestroyTemporaryVariableTwice(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable([1, 2], dtypes.float32)
val1 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
val2 = gen_state_ops._destroy_temporary_variable(var, var_name="dup")
final = val1 + val2
with self.assertRaises(errors.NotFoundError):
final.eval()
def testTemporaryVariableNoLeak(self):
with self.test_session(use_gpu=True):
var = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="bar")
final = array_ops.identity(var)
final.eval()
def testTwoTemporaryVariablesNoLeaks(self):
with self.test_session(use_gpu=True):
var1 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="var1")
var2 = gen_state_ops._temporary_variable(
[1, 2], dtypes.float32, var_name="var2")
final = var1 + var2
final.eval()
def testAssignDependencyAcrossDevices(self):
with self.test_session(use_gpu=True):
var = state_ops.variable_op([1], dtypes.float32)
state_ops.assign(var, [1.0]).eval()
increment = state_ops.assign_add(var, [1.0])
with ops.control_dependencies([increment]):
with ops.device("/cpu:0"):
result = math_ops.multiply(var, var)
self.assertAllClose([4.0], result.eval())
def testIsVariableInitialized(self):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
v0 = state_ops.variable_op([1, 2], dtypes.float32)
self.assertEqual(False, variables.is_variable_initialized(v0).eval())
state_ops.assign(v0, [[2.0, 3.0]]).eval()
self.assertEqual(True, variables.is_variable_initialized(v0).eval())
if __name__ == "__main__":
test.main()
| true
| true
|
790710a4696737e320d90c8c3b766f346cca7bef
| 2,133
|
py
|
Python
|
bot.py
|
sagol/umorilibot
|
89e4bdc9771c21326768171099ee9872dc40b194
|
[
"MIT"
] | 1
|
2021-02-19T11:13:24.000Z
|
2021-02-19T11:13:24.000Z
|
bot.py
|
sagol/umorilibot
|
89e4bdc9771c21326768171099ee9872dc40b194
|
[
"MIT"
] | null | null | null |
bot.py
|
sagol/umorilibot
|
89e4bdc9771c21326768171099ee9872dc40b194
|
[
"MIT"
] | null | null | null |
from sources import Sources
from stories import Stories
class Bot():
def __init__(self, config):
self.url = config.get_url()
self.sources = None
self.stories = None
def load(self):
self.sources = Sources(self.url)
self.stories = Stories(self.sources)
return self.stories.load()
def start(self, url):
message = 'Бот для сайта {0}'.format(url)
return message
def help(self):
message = "/get - читать истории из: \n\t{0}\n"\
"/random - случайные истории\n"\
"/stop - прервать диалог с ботом".format(
'\n\t'.join(['{0}'.format(y) for (x,y) in self.stories.get_description().items()]))
return message
def random(self, num=None, site_names=None):
if site_names is None:
site_names = list(self.stories.get_names().keys())
sites = list(self.stories.get_names().values())
messages = []
stories = self.stories.get(num=num, site_names=site_names,
sites=sites, random=True)
for s in stories:
messages.append(s.get().get('story'))
return messages
def get(self, num=None, site_names=None):
if site_names is None:
site_names = list(self.stories.get_names().keys())
sites = list(self.stories.get_names().values())
messages = []
stories = self.stories.get(num=num, site_names=site_names,
sites=sites)
for s in stories:
messages.append(s.get().get('story'))
return messages
def get_sources_sites(self):
sites = set()
for sites_list in self.sources.get():
for site in sites_list:
sites.add(site.get('site'))
return list(sites)
def get_sources_names(self, site):
names = set()
for sites_list in self.sources.get():
for s in sites_list:
if s.get('site') == site:
names.add((s.get('name'), s.get('desc')))
return list(names)
| 34.403226
| 95
| 0.547586
|
from sources import Sources
from stories import Stories
class Bot():
def __init__(self, config):
self.url = config.get_url()
self.sources = None
self.stories = None
def load(self):
self.sources = Sources(self.url)
self.stories = Stories(self.sources)
return self.stories.load()
def start(self, url):
message = 'Бот для сайта {0}'.format(url)
return message
def help(self):
message = "/get - читать истории из: \n\t{0}\n"\
"/random - случайные истории\n"\
"/stop - прервать диалог с ботом".format(
'\n\t'.join(['{0}'.format(y) for (x,y) in self.stories.get_description().items()]))
return message
def random(self, num=None, site_names=None):
if site_names is None:
site_names = list(self.stories.get_names().keys())
sites = list(self.stories.get_names().values())
messages = []
stories = self.stories.get(num=num, site_names=site_names,
sites=sites, random=True)
for s in stories:
messages.append(s.get().get('story'))
return messages
def get(self, num=None, site_names=None):
if site_names is None:
site_names = list(self.stories.get_names().keys())
sites = list(self.stories.get_names().values())
messages = []
stories = self.stories.get(num=num, site_names=site_names,
sites=sites)
for s in stories:
messages.append(s.get().get('story'))
return messages
def get_sources_sites(self):
sites = set()
for sites_list in self.sources.get():
for site in sites_list:
sites.add(site.get('site'))
return list(sites)
def get_sources_names(self, site):
names = set()
for sites_list in self.sources.get():
for s in sites_list:
if s.get('site') == site:
names.add((s.get('name'), s.get('desc')))
return list(names)
| true
| true
|
79071170e9dbb393696a52dfc7f26f101793ac87
| 165
|
py
|
Python
|
plugins/data/gan/digitsDataPluginGan/__init__.py
|
wills2133/digits-ssd
|
addf2fda32291a02a7c602b9d58d37ca71afe79d
|
[
"BSD-3-Clause"
] | 4,552
|
2015-03-17T17:24:11.000Z
|
2022-03-27T04:07:58.000Z
|
plugins/data/gan/digitsDataPluginGan/__init__.py
|
wills2133/digits-ssd
|
addf2fda32291a02a7c602b9d58d37ca71afe79d
|
[
"BSD-3-Clause"
] | 1,994
|
2015-03-17T21:46:44.000Z
|
2022-03-19T18:20:29.000Z
|
plugins/data/gan/digitsDataPluginGan/__init__.py
|
wills2133/digits-ssd
|
addf2fda32291a02a7c602b9d58d37ca71afe79d
|
[
"BSD-3-Clause"
] | 1,791
|
2015-03-17T17:51:05.000Z
|
2022-03-08T13:44:40.000Z
|
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .data import DataIngestion
__all__ = ['DataIngestion']
| 23.571429
| 63
| 0.787879
|
from __future__ import absolute_import
from .data import DataIngestion
__all__ = ['DataIngestion']
| true
| true
|
7907119ded5016468228022e0aeb09611a106f15
| 2,231
|
py
|
Python
|
Python/find-k-pairs-with-smallest-sums.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2022-01-30T06:55:28.000Z
|
2022-01-30T06:55:28.000Z
|
Python/find-k-pairs-with-smallest-sums.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | null | null | null |
Python/find-k-pairs-with-smallest-sums.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2021-12-31T03:56:39.000Z
|
2021-12-31T03:56:39.000Z
|
# Time: O(k * log(min(n, m, k))), where n is the size of num1, and m is the size of num2.
# Space: O(min(n, m, k))
# You are given two integer arrays nums1
# and nums2 sorted in ascending order and an integer k.
#
# Define a pair (u,v) which consists of one element
# from the first array and one element from the second array.
#
# Find the k pairs (u1,v1),(u2,v2) ...(uk,vk) with the smallest sums.
#
# Example 1:
# Given nums1 = [1,7,11], nums2 = [2,4,6], k = 3
#
# Return: [1,2],[1,4],[1,6]
#
# The first 3 pairs are returned from the sequence:
# [1,2],[1,4],[1,6],[7,2],[7,4],[11,2],[7,6],[11,4],[11,6]
# Example 2:
# Given nums1 = [1,1,2], nums2 = [1,2,3], k = 2
#
# Return: [1,1],[1,1]
#
# The first 2 pairs are returned from the sequence:
# [1,1],[1,1],[1,2],[2,1],[1,2],[2,2],[1,3],[1,3],[2,3]
# Example 3:
# Given nums1 = [1,2], nums2 = [3], k = 3
#
# Return: [1,3],[2,3]
#
# All possible pairs are returned from the sequence:
# [1,3],[2,3]
from heapq import heappush, heappop
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
pairs = []
if len(nums1) > len(nums2):
tmp = self.kSmallestPairs(nums2, nums1, k)
for pair in tmp:
pairs.append([pair[1], pair[0]])
return pairs
min_heap = []
def push(i, j):
if i < len(nums1) and j < len(nums2):
heappush(min_heap, [nums1[i] + nums2[j], i, j])
push(0, 0)
while min_heap and len(pairs) < k:
_, i, j = heappop(min_heap)
pairs.append([nums1[i], nums2[j]])
push(i, j + 1)
if j == 0:
push(i + 1, 0) # at most queue min(n, m) space
return pairs
# time: O(mn * log k)
# space: O(k)
from heapq import nsmallest
from itertools import product
class Solution2(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
return nsmallest(k, product(nums1, nums2), key=sum)
| 27.54321
| 90
| 0.53922
|
from heapq import heappush, heappop
class Solution(object):
def kSmallestPairs(self, nums1, nums2, k):
pairs = []
if len(nums1) > len(nums2):
tmp = self.kSmallestPairs(nums2, nums1, k)
for pair in tmp:
pairs.append([pair[1], pair[0]])
return pairs
min_heap = []
def push(i, j):
if i < len(nums1) and j < len(nums2):
heappush(min_heap, [nums1[i] + nums2[j], i, j])
push(0, 0)
while min_heap and len(pairs) < k:
_, i, j = heappop(min_heap)
pairs.append([nums1[i], nums2[j]])
push(i, j + 1)
if j == 0:
push(i + 1, 0)
return pairs
from heapq import nsmallest
from itertools import product
class Solution2(object):
def kSmallestPairs(self, nums1, nums2, k):
return nsmallest(k, product(nums1, nums2), key=sum)
| true
| true
|
790711f44d3bd07658ab5643189d3e8a06e23288
| 5,910
|
py
|
Python
|
tests/python/pants_test/backend/graph_info/tasks/test_list_targets.py
|
rahuliyer95/pants
|
50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/graph_info/tasks/test_list_targets.py
|
rahuliyer95/pants
|
50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/graph_info/tasks/test_list_targets.py
|
rahuliyer95/pants
|
50ee5cc8bd9ab40ad13c3c28ccbc4e7f189292ec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from textwrap import dedent
import pytest
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.repository import Repository
from pants.backend.jvm.scala_artifact import ScalaArtifact
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants.rules.core import list_targets_old
from pants.testutil.goal_rule_test_base import GoalRuleTestBase
class ListTargetsTest(GoalRuleTestBase):
goal_cls = list_targets_old.List
@classmethod
def alias_groups(cls):
return BuildFileAliases(
targets={
"target": Target,
"java_library": JavaLibrary,
"python_library": PythonLibrary,
},
objects={
"pants": lambda x: x,
"artifact": Artifact,
"scala_artifact": ScalaArtifact,
"public": Repository(
name="public", url="http://maven.example.com", push_db_basedir="/tmp"
),
},
)
@classmethod
def rules(cls):
return super().rules() + list_targets_old.rules()
def setUp(self) -> None:
super().setUp()
# Setup a BUILD tree for various list tests
class Lib:
def __init__(self, name: str, provides: bool = False) -> None:
self.name = name
self.provides = (
dedent(
f"""
artifact(
org='com.example',
name='{name}',
repo=public
)
"""
).strip()
if provides
else "None"
)
def create_library(path: str, *libs: Lib) -> None:
libs = libs or (Lib(os.path.basename(os.path.dirname(self.build_path(path)))),)
for lib in libs:
target = f"java_library(name='{lib.name}', provides={lib.provides}, sources=[])\n"
self.add_to_build_file(path, target)
create_library("a")
create_library("a/b", Lib("b", provides=True))
create_library("a/b/c", Lib("c"), Lib("c2", provides=True), Lib("c3"))
create_library("a/b/d")
create_library("a/b/e", Lib("e1"))
self.add_to_build_file(
"f",
dedent(
'''
target(
name='alias',
dependencies=[
'a/b/c:c3',
'a/b/d:d',
],
description = """
Exercises alias resolution.
Further description.
""",
)
'''
),
)
def test_list_all_empty(self):
# NB: Also renders a warning to stderr, which is challenging to detect here but confirmed in:
# tests/python/pants_test/engine/legacy/test_list_integration.py
self.assert_console_output(args=[])
def test_list_path(self):
self.assert_console_output("a/b:b", args=["a/b"])
def test_list_siblings(self):
self.assert_console_output("a/b:b", args=["a/b:"])
self.assert_console_output("a/b/c:c", "a/b/c:c2", "a/b/c:c3", args=["a/b/c/:"])
def test_list_descendants(self):
self.assert_console_output("a/b/c:c", "a/b/c:c2", "a/b/c:c3", args=["a/b/c/::"])
self.assert_console_output(
"a/b:b", "a/b/c:c", "a/b/c:c2", "a/b/c:c3", "a/b/d:d", "a/b/e:e1", args=["a/b::"]
)
@pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/8678")
def test_list_all(self):
self.assert_entries(
"\n",
"a:a",
"a/b:b",
"a/b/c:c",
"a/b/c:c2",
"a/b/c:c3",
"a/b/d:d",
"a/b/e:e1",
"f:alias",
args=["::"],
)
self.assert_entries(
", ",
"a:a",
"a/b:b",
"a/b/c:c",
"a/b/c:c2",
"a/b/c:c3",
"a/b/d:d",
"a/b/e:e1",
"f:alias",
args=["--sep=, ", "::"],
)
self.assert_console_output(
"a:a",
"a/b:b",
"a/b/c:c",
"a/b/c:c2",
"a/b/c:c3",
"a/b/d:d",
"a/b/e:e1",
"f:alias",
args=["::"],
)
def test_list_provides(self):
self.assert_console_output(
"a/b:b com.example#b", "a/b/c:c2 com.example#c2", args=["--provides", "::"]
)
def test_list_provides_customcols(self):
self.assert_console_output(
"/tmp a/b:b http://maven.example.com public com.example#b",
"/tmp a/b/c:c2 http://maven.example.com public com.example#c2",
args=[
"--provides",
"--provides-columns=push_db_basedir,address,repo_url,repo_name,artifact_id",
"::",
],
)
def test_list_dedups(self):
self.assert_console_output("a/b/c:c3", "a/b/d:d", args=["a/b/d/::", "a/b/c:c3", "a/b/d:d"])
def test_list_documented(self):
self.assert_console_output(
# Confirm empty listing
args=["--documented", "a/b"],
)
self.assert_console_output_ordered(
"f:alias",
" Exercises alias resolution.",
" Further description.",
args=["--documented", "::"],
)
| 31.774194
| 101
| 0.494585
|
import os
from textwrap import dedent
import pytest
from pants.backend.jvm.artifact import Artifact
from pants.backend.jvm.repository import Repository
from pants.backend.jvm.scala_artifact import ScalaArtifact
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
from pants.rules.core import list_targets_old
from pants.testutil.goal_rule_test_base import GoalRuleTestBase
class ListTargetsTest(GoalRuleTestBase):
goal_cls = list_targets_old.List
@classmethod
def alias_groups(cls):
return BuildFileAliases(
targets={
"target": Target,
"java_library": JavaLibrary,
"python_library": PythonLibrary,
},
objects={
"pants": lambda x: x,
"artifact": Artifact,
"scala_artifact": ScalaArtifact,
"public": Repository(
name="public", url="http://maven.example.com", push_db_basedir="/tmp"
),
},
)
@classmethod
def rules(cls):
return super().rules() + list_targets_old.rules()
def setUp(self) -> None:
super().setUp()
class Lib:
def __init__(self, name: str, provides: bool = False) -> None:
self.name = name
self.provides = (
dedent(
f"""
artifact(
org='com.example',
name='{name}',
repo=public
)
"""
).strip()
if provides
else "None"
)
def create_library(path: str, *libs: Lib) -> None:
libs = libs or (Lib(os.path.basename(os.path.dirname(self.build_path(path)))),)
for lib in libs:
target = f"java_library(name='{lib.name}', provides={lib.provides}, sources=[])\n"
self.add_to_build_file(path, target)
create_library("a")
create_library("a/b", Lib("b", provides=True))
create_library("a/b/c", Lib("c"), Lib("c2", provides=True), Lib("c3"))
create_library("a/b/d")
create_library("a/b/e", Lib("e1"))
self.add_to_build_file(
"f",
dedent(
'''
target(
name='alias',
dependencies=[
'a/b/c:c3',
'a/b/d:d',
],
description = """
Exercises alias resolution.
Further description.
""",
)
'''
),
)
def test_list_all_empty(self):
self.assert_console_output(args=[])
def test_list_path(self):
self.assert_console_output("a/b:b", args=["a/b"])
def test_list_siblings(self):
self.assert_console_output("a/b:b", args=["a/b:"])
self.assert_console_output("a/b/c:c", "a/b/c:c2", "a/b/c:c3", args=["a/b/c/:"])
def test_list_descendants(self):
self.assert_console_output("a/b/c:c", "a/b/c:c2", "a/b/c:c3", args=["a/b/c/::"])
self.assert_console_output(
"a/b:b", "a/b/c:c", "a/b/c:c2", "a/b/c:c3", "a/b/d:d", "a/b/e:e1", args=["a/b::"]
)
@pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/8678")
def test_list_all(self):
self.assert_entries(
"\n",
"a:a",
"a/b:b",
"a/b/c:c",
"a/b/c:c2",
"a/b/c:c3",
"a/b/d:d",
"a/b/e:e1",
"f:alias",
args=["::"],
)
self.assert_entries(
", ",
"a:a",
"a/b:b",
"a/b/c:c",
"a/b/c:c2",
"a/b/c:c3",
"a/b/d:d",
"a/b/e:e1",
"f:alias",
args=["--sep=, ", "::"],
)
self.assert_console_output(
"a:a",
"a/b:b",
"a/b/c:c",
"a/b/c:c2",
"a/b/c:c3",
"a/b/d:d",
"a/b/e:e1",
"f:alias",
args=["::"],
)
def test_list_provides(self):
self.assert_console_output(
"a/b:b com.example#b", "a/b/c:c2 com.example#c2", args=["--provides", "::"]
)
def test_list_provides_customcols(self):
self.assert_console_output(
"/tmp a/b:b http://maven.example.com public com.example#b",
"/tmp a/b/c:c2 http://maven.example.com public com.example#c2",
args=[
"--provides",
"--provides-columns=push_db_basedir,address,repo_url,repo_name,artifact_id",
"::",
],
)
def test_list_dedups(self):
self.assert_console_output("a/b/c:c3", "a/b/d:d", args=["a/b/d/::", "a/b/c:c3", "a/b/d:d"])
def test_list_documented(self):
self.assert_console_output(
args=["--documented", "a/b"],
)
self.assert_console_output_ordered(
"f:alias",
" Exercises alias resolution.",
" Further description.",
args=["--documented", "::"],
)
| true
| true
|
7907135da0f963d80a425f52deab8e3b6f5b62c0
| 15,763
|
py
|
Python
|
osxphotos/cli/about.py
|
oPromessa/osxphotos
|
0d7e324f0262093727147b9f22ed275e962e8725
|
[
"MIT"
] | null | null | null |
osxphotos/cli/about.py
|
oPromessa/osxphotos
|
0d7e324f0262093727147b9f22ed275e962e8725
|
[
"MIT"
] | null | null | null |
osxphotos/cli/about.py
|
oPromessa/osxphotos
|
0d7e324f0262093727147b9f22ed275e962e8725
|
[
"MIT"
] | null | null | null |
"""about command for osxphotos CLI"""
from textwrap import dedent
import click
from osxphotos._constants import OSXPHOTOS_URL
from osxphotos._version import __version__
MIT_LICENSE = """
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
APACHE_2_0_LICENSE = """
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
BSD_3_CLAUSE_LICENSE = """
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be
used to endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
ISC_LICENSE = """
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
LICENSE = dedent(
f"""
osxphotos is copyright (c) 2019-2022 by Rhet Turnbull and is licensed under the MIT license:
{MIT_LICENSE}
osxphotos uses the following 3rd party software licensed under the BSD-3-Clause License:
Click (Copyright 2014 Pallets), ptpython (Copyright (c) 2015, Jonathan Slenders)
{BSD_3_CLAUSE_LICENSE}
osxphotos uses the following 3rd party software licensed under the Apache 2.0 License:
tenacity (Copyright Julien Danjou)
{APACHE_2_0_LICENSE}
osxphotos uses the following 3rd part software licensed under the ISC License:
xdg (Copyright 2016-2021 Scott Stevenson <scott@stevenson.io>)
{ISC_LICENSE}
"""
)
@click.command(name="about")
@click.pass_obj
@click.pass_context
def about(ctx, cli_obj):
"""Print information about osxphotos including license."""
click.echo_via_pager(
f"osxphotos, version {__version__}\n\n"
f"Source code available at: {OSXPHOTOS_URL}\n"
f"{LICENSE}"
)
| 50.848387
| 104
| 0.748588
|
from textwrap import dedent
import click
from osxphotos._constants import OSXPHOTOS_URL
from osxphotos._version import __version__
MIT_LICENSE = """
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
APACHE_2_0_LICENSE = """
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
BSD_3_CLAUSE_LICENSE = """
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be
used to endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
ISC_LICENSE = """
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
LICENSE = dedent(
f"""
osxphotos is copyright (c) 2019-2022 by Rhet Turnbull and is licensed under the MIT license:
{MIT_LICENSE}
osxphotos uses the following 3rd party software licensed under the BSD-3-Clause License:
Click (Copyright 2014 Pallets), ptpython (Copyright (c) 2015, Jonathan Slenders)
{BSD_3_CLAUSE_LICENSE}
osxphotos uses the following 3rd party software licensed under the Apache 2.0 License:
tenacity (Copyright Julien Danjou)
{APACHE_2_0_LICENSE}
osxphotos uses the following 3rd part software licensed under the ISC License:
xdg (Copyright 2016-2021 Scott Stevenson <scott@stevenson.io>)
{ISC_LICENSE}
"""
)
@click.command(name="about")
@click.pass_obj
@click.pass_context
def about(ctx, cli_obj):
click.echo_via_pager(
f"osxphotos, version {__version__}\n\n"
f"Source code available at: {OSXPHOTOS_URL}\n"
f"{LICENSE}"
)
| true
| true
|
7907144cfa4b569479c86296f45c647d9b00f6ab
| 3,590
|
py
|
Python
|
cinfo/triager.py
|
EliadCohen/cinfo
|
70acd2c4c47aee4dc12b0a9e0e6cdfe2b6d902e9
|
[
"Apache-2.0"
] | null | null | null |
cinfo/triager.py
|
EliadCohen/cinfo
|
70acd2c4c47aee4dc12b0a9e0e6cdfe2b6d902e9
|
[
"Apache-2.0"
] | null | null | null |
cinfo/triager.py
|
EliadCohen/cinfo
|
70acd2c4c47aee4dc12b0a9e0e6cdfe2b6d902e9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import crayons
import importlib
import logging
import os
import sys
from cinfo.config import Config
from cinfo.exceptions import usage as usage_exc
LOG = logging.getLogger(__name__)
class Triager(object):
def __init__(self, config_file, source_name=None, target_name=None):
self.config_file = config_file
self.source_name = source_name
self.target_name = target_name
self.workspace = os.path.join(os.path.expanduser('~'), '.cinfo')
def load_config(self):
self.config = Config(file=self.config_file)
self.config.load()
self.sources = self.config.data['sources']
self.targets = self.config.data['targets']
def pull(self):
LOG.info("{}: {}".format(
crayons.yellow("pulling information from the source"),
self.source_name))
try:
driver = getattr(importlib.import_module(
"cinfo.drivers.{}".format(self.source['type'])),
self.source['type'].capitalize())()
except KeyError:
LOG.error("{}: {}...exiting".format(
crayons.red("No such source"), self.source))
sys.exit(2)
self.data = driver.pull(self.source['url'],
jobs=self.source['jobs'])
if not self.data:
LOG.warning("{}".format(crayons.red(
"I've pulled nothing! outrageous!")))
self.write(self.data)
def publish(self):
LOG.info("{}: {}".format(
crayons.yellow("publishing data to target"),
self.target['url']))
try:
publisher = getattr(importlib.import_module(
"cinfo.drivers.{}".format(self.target['type'])),
self.target['type'].capitalize())()
except KeyError:
LOG.error("{}: {}...exiting".format(
crayons.red("No such target"), self.target))
sys.exit(2)
publisher.publish(self.data)
def write(self, data):
pass
def validate(self):
if len(self.sources.keys()) > 1 and not self.source_name:
LOG.error(usage_exc.multiple_options("source"))
sys.exit(2)
elif not self.source_name:
self.source = list(self.sources.values())[0]
else:
try:
self.source = self.sources[self.source_name]
except KeyError:
LOG.error(usage_exc.missing_value(
self.source_name, [key for key in self.sources.keys()]))
sys.exit(2)
if len(self.targets.keys()) > 1 and not self.target:
LOG.error(usage_exc.multiple_options("target"))
sys.exit(2)
elif not self.target_name:
self.target = list(self.targets.values())[0]
else:
self.target = self.targets[self.target_name]
def run(self):
self.load_config()
self.validate()
self.pull()
self.publish()
| 34.854369
| 78
| 0.591086
|
import crayons
import importlib
import logging
import os
import sys
from cinfo.config import Config
from cinfo.exceptions import usage as usage_exc
LOG = logging.getLogger(__name__)
class Triager(object):
def __init__(self, config_file, source_name=None, target_name=None):
self.config_file = config_file
self.source_name = source_name
self.target_name = target_name
self.workspace = os.path.join(os.path.expanduser('~'), '.cinfo')
def load_config(self):
self.config = Config(file=self.config_file)
self.config.load()
self.sources = self.config.data['sources']
self.targets = self.config.data['targets']
def pull(self):
LOG.info("{}: {}".format(
crayons.yellow("pulling information from the source"),
self.source_name))
try:
driver = getattr(importlib.import_module(
"cinfo.drivers.{}".format(self.source['type'])),
self.source['type'].capitalize())()
except KeyError:
LOG.error("{}: {}...exiting".format(
crayons.red("No such source"), self.source))
sys.exit(2)
self.data = driver.pull(self.source['url'],
jobs=self.source['jobs'])
if not self.data:
LOG.warning("{}".format(crayons.red(
"I've pulled nothing! outrageous!")))
self.write(self.data)
def publish(self):
LOG.info("{}: {}".format(
crayons.yellow("publishing data to target"),
self.target['url']))
try:
publisher = getattr(importlib.import_module(
"cinfo.drivers.{}".format(self.target['type'])),
self.target['type'].capitalize())()
except KeyError:
LOG.error("{}: {}...exiting".format(
crayons.red("No such target"), self.target))
sys.exit(2)
publisher.publish(self.data)
def write(self, data):
pass
def validate(self):
if len(self.sources.keys()) > 1 and not self.source_name:
LOG.error(usage_exc.multiple_options("source"))
sys.exit(2)
elif not self.source_name:
self.source = list(self.sources.values())[0]
else:
try:
self.source = self.sources[self.source_name]
except KeyError:
LOG.error(usage_exc.missing_value(
self.source_name, [key for key in self.sources.keys()]))
sys.exit(2)
if len(self.targets.keys()) > 1 and not self.target:
LOG.error(usage_exc.multiple_options("target"))
sys.exit(2)
elif not self.target_name:
self.target = list(self.targets.values())[0]
else:
self.target = self.targets[self.target_name]
def run(self):
self.load_config()
self.validate()
self.pull()
self.publish()
| true
| true
|
7907153f7348c34677e0563a3a2828ed9d361e52
| 282
|
py
|
Python
|
chat/urls.py
|
tawhidularefindcc/Django-Speech-to-text-Chat
|
51a3c531f99da829c7f59310ed9947d5f535c7ba
|
[
"MIT"
] | 18
|
2020-01-31T11:42:46.000Z
|
2022-02-12T17:22:36.000Z
|
chat/urls.py
|
tawhidularefindcc/Django-Speech-to-text-Chat
|
51a3c531f99da829c7f59310ed9947d5f535c7ba
|
[
"MIT"
] | null | null | null |
chat/urls.py
|
tawhidularefindcc/Django-Speech-to-text-Chat
|
51a3c531f99da829c7f59310ed9947d5f535c7ba
|
[
"MIT"
] | 10
|
2020-02-09T01:06:57.000Z
|
2022-03-01T02:05:42.000Z
|
from django.urls import path
from . import views
app_name = "chat"
urlpatterns = [
path('', views.home, name='home'),
path('post/', views.post, name='post'),
path('messages/', views.messages, name='messages'),
path('upload/', views.upload, name='views.upload'),
]
| 23.5
| 55
| 0.641844
|
from django.urls import path
from . import views
app_name = "chat"
urlpatterns = [
path('', views.home, name='home'),
path('post/', views.post, name='post'),
path('messages/', views.messages, name='messages'),
path('upload/', views.upload, name='views.upload'),
]
| true
| true
|
7907162d4b8b3873075fa92023988b32715a97db
| 62
|
py
|
Python
|
test/test_132_pattern.py
|
spencercjh/sync-leetcode-today-problem-python3-example
|
4957e5eadb697334741df0fc297bec2edaa9e2ab
|
[
"Apache-2.0"
] | null | null | null |
test/test_132_pattern.py
|
spencercjh/sync-leetcode-today-problem-python3-example
|
4957e5eadb697334741df0fc297bec2edaa9e2ab
|
[
"Apache-2.0"
] | null | null | null |
test/test_132_pattern.py
|
spencercjh/sync-leetcode-today-problem-python3-example
|
4957e5eadb697334741df0fc297bec2edaa9e2ab
|
[
"Apache-2.0"
] | null | null | null |
solution = 132Pattern()
assert X == solution.find132pattern( )
| 31
| 38
| 0.758065
|
solution = 132Pattern()
assert X == solution.find132pattern( )
| false
| true
|
790716691a1cc9ac048c175ccfa4a0605f8cf294
| 1,328
|
py
|
Python
|
numba/cuda/tests/cudapy/test_deprecation.py
|
svrakitin/numba
|
830a2c7ccc410f270677b0b241f9b8acc2598101
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
numba/cuda/tests/cudapy/test_deprecation.py
|
svrakitin/numba
|
830a2c7ccc410f270677b0b241f9b8acc2598101
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2019-08-29T21:03:09.000Z
|
2019-08-29T21:04:26.000Z
|
numba/cuda/tests/cudapy/test_deprecation.py
|
svrakitin/numba
|
830a2c7ccc410f270677b0b241f9b8acc2598101
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
import warnings
from contextlib import contextmanager
from numba.tests.support import override_config, TestCase
from numba.cuda.testing import skip_on_cudasim
from numba import cuda
from numba.core import types
from numba.cuda.testing import SerialMixin
import unittest
@skip_on_cudasim("Skipped on simulator")
class TestCudaDebugInfo(SerialMixin, TestCase):
"""Tests features that will be deprecated
"""
@contextmanager
def assert_deprecation_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
def test_autotune(self):
@cuda.jit("(int32[:],)")
def foo(xs):
xs[0] = 1
with self.assert_deprecation_warning() as w:
foo.autotune
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert ".autotune" in str(w[-1].message)
with self.assert_deprecation_warning() as w:
foo.occupancy
assert len(w) == 2
assert issubclass(w[0].category, DeprecationWarning)
assert ".occupancy" in str(w[0].message)
assert issubclass(w[1].category, DeprecationWarning)
assert ".autotune" in str(w[1].message)
if __name__ == '__main__':
unittest.main()
| 30.181818
| 65
| 0.653614
|
import warnings
from contextlib import contextmanager
from numba.tests.support import override_config, TestCase
from numba.cuda.testing import skip_on_cudasim
from numba import cuda
from numba.core import types
from numba.cuda.testing import SerialMixin
import unittest
@skip_on_cudasim("Skipped on simulator")
class TestCudaDebugInfo(SerialMixin, TestCase):
@contextmanager
def assert_deprecation_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
def test_autotune(self):
@cuda.jit("(int32[:],)")
def foo(xs):
xs[0] = 1
with self.assert_deprecation_warning() as w:
foo.autotune
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert ".autotune" in str(w[-1].message)
with self.assert_deprecation_warning() as w:
foo.occupancy
assert len(w) == 2
assert issubclass(w[0].category, DeprecationWarning)
assert ".occupancy" in str(w[0].message)
assert issubclass(w[1].category, DeprecationWarning)
assert ".autotune" in str(w[1].message)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790716bf91b361f85620be2e04340fe297d1b360
| 7,516
|
py
|
Python
|
Server/checkin.py
|
varetic/HEU-Checkin-COVID-19
|
03507e60087125adc03e7b6e160d1b88128dae43
|
[
"MIT"
] | 6
|
2021-01-18T06:21:45.000Z
|
2021-02-01T08:24:04.000Z
|
Server/checkin.py
|
varetic/HEU-Checkin-COVID-19
|
03507e60087125adc03e7b6e160d1b88128dae43
|
[
"MIT"
] | null | null | null |
Server/checkin.py
|
varetic/HEU-Checkin-COVID-19
|
03507e60087125adc03e7b6e160d1b88128dae43
|
[
"MIT"
] | 1
|
2020-09-02T03:58:42.000Z
|
2020-09-02T03:58:42.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
平安行动自动打卡
请事先安装好 lxml 和 requests 模块
pip install lxml requests
然后修改 27-31 行为自己的数据,未使用的变量保持原样即可
如有需要请自行配置 149-171 行的 SMTP 发信或 174-177 行的 Server 酱微信提醒
Created on 2020-04-13 20:20
@author: ZhangJiawei & Liu Chongpeng & Liu Lu
"""
import requests
import lxml.html
import re
import json
import random
import time
import smtplib
import traceback
myid = "STUDENTID"
mypass = "PASSWORD"
mybound = "BOUNDFIELDS"
mydata = r'FORMDATA'
# mysckey = "SCKEY"
title = ""
msg = ""
proxies = {"http": None, "https": None}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "MESSAGE_TICKET=%7B%22times%22%3A0%7D; ",
"Host": "cas.hrbeu.edu.cn",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362"
}
def findStr(source, target):
return source.find(target) != -1
if __name__ == '__main__':
try:
## 登陆校园网络认证界面
url_login = 'https://cas.hrbeu.edu.cn/cas/login?'
print("============================\n[debug] Begin to login ...")
sesh = requests.session()
req = sesh.get(url_login, proxies=proxies)
html_content = req.text
login_html = lxml.html.fromstring(html_content)
hidden_inputs = login_html.xpath( r'//div[@id="main"]//input[@type="hidden"]')
user_form = {x.attrib["name"]: x.attrib["value"] for x in hidden_inputs}
user_form["username"] = myid
user_form["password"] = mypass
user_form["captcha"] = ''
user_form["submit"] = '登 录'
headers['Cookie'] = headers['Cookie'] + req.headers['Set-cookie']
req.url = f'https://cas.hrbeu.edu.cn/cas/login'
response302 = sesh.post(req.url, data=user_form, headers=headers, proxies=proxies)
## 进入平安行动界面
jkgc_response = sesh.get( "http://jkgc.hrbeu.edu.cn/infoplus/form/JSXNYQSBtest/start", proxies=proxies)
headers['Accept'] = '*/*'
headers['Cookie'] = jkgc_response.request.headers['Cookie']
headers['Host'] = 'jkgc.hrbeu.edu.cn'
headers['Referer'] = jkgc_response.url
jkgc_html = lxml.html.fromstring(jkgc_response.text)
csrfToken = jkgc_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken = csrfToken.pop().attrib["content"]
jkgc_form = {
'idc': 'JSXNYQSBtest',
'release': '',
'csrfToken': csrfToken,
'formData': {
'_VAR_URL': jkgc_response.url,
'_VAR_URL_Attr': {}
}
}
jkgc_form['formData'] = json.dumps(jkgc_form['formData'])
jkgc_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/start'
response3 = sesh.post(jkgc_url, data=jkgc_form, headers=headers, proxies=proxies)
## 提交平安行动表单
form_url = json.loads(response3.text)['entities'][0]
form_response = sesh.get(form_url)
headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
headers['Referer'] = form_url
headers['X-Requested-With'] = 'XMLHttpRequest'
submit_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/doAction'
submit_html = lxml.html.fromstring(form_response.text)
csrfToken2 = submit_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken2 = csrfToken2.pop().attrib["content"]
submit_form = {
'actionId': '1',
'boundFields': mybound, # boundFields 修改位置
'csrfToken': csrfToken2,
'formData': mydata, # formData 修改位置
'lang': 'zh',
'nextUsers': '{}',
'rand': str(random.random() * 999),
'remark': '',
'stepId': re.match(r'.*form/(\d*?)/', form_response.url).group(1),
'timestamp': str(int(time.time()+0.5))
}
response_end = sesh.post(submit_url, data=submit_form, headers=headers, proxies=proxies)
resJson = json.loads(response_end.text)
## 表单填写完成,返回结果
print('[debug] Form url: ', form_response.url)
print('[debug] Form Status: ', resJson['ecode'])
print('[debug] Form stJson: ', resJson)
## 生成提醒返回的标题和信息
if (resJson['errno'] == 0):
print('[info] Checkin succeed with jsoncode', resJson['ecode'])
title = f'打卡成功 <{submit_form["stepId"]}>'
msg = '\t表单地址: ' + form_response.url + '\n\n\t表单状态: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
else:
print('[error] Checkin error with jsoncode', resJson['ecode'])
title = f'打卡失败!校网出错'
msg = '\t表单地址: ' + form_response.url + '\n\n\t错误信息: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
except:
print('\n[error] :.:.:.:.: Except return :.:.:.:.:')
err = traceback.format_exc()
print('[error] Python Error: \n', err)
title = '打卡失败!脚本出错'
msg = '\t脚本报错: \n\n\t' + err + '============================\n'
finally:
print(':.:.:.:.: Finally :.:.:.:.:')
## 发送邮件
# from email.mime.text import MIMEText
# from email.header import Header
# mail_host = "smtp.qq.com" # SMTP 服务器地址
# mail_user = "sender@example.com" # SMTP 发信邮箱用户名
# mail_pass = "emailpassword" # SMTP 发信邮箱密码
# sender = 'sender@example.com' # 发信人邮箱,即 SMTP 发信邮箱用户名
# receivers = ['receiver@example.com'] # 收信人邮箱,多邮箱以数组形式写
# message = MIMEText(msg, 'plain', 'utf-8')
# message['From'] = Header("1@example.com", 'utf-8') # 发信人邮箱,仅用于显示
# message['To'] = Header("2@example.com", 'utf-8') # 收信人邮箱,仅用于显示
# subject = title
# message['Subject'] = Header(subject, 'utf-8')
# try:
# smtpObj = smtplib.SMTP_SSL(mail_host) # Python 3.7 及以上版本 SSL 加密发信
# smtpObj.connect(mail_host, 465) # Python 3.7 及以上版本 加密发信 SMTP 端口号 465
# smtpObj.login(mail_user,mail_pass)
# smtpObj.sendmail(sender, receivers, message.as_string())
# print ("[info] Success: The email was sent successfully") # 日志输出
# except smtplib.SMTPException:
# print ("[error] Error: Can not send mail") # 日志输出
## 或者发送 Server 酱的微信提醒
# wcurl = 'https://sc.ftqq.com/' + mysckey + '.send'
# wcdata = {'text': title, 'desp': msg}
# try:
# wcresult = requests.post(wcurl, wcdata)
# print('[info] Notification sended at', time.strftime("%Y-%m-%d %H:%M:%S %A", time.localtime()))
# except:
# print('[error] Failed to send notification!')
print('[info] Task Finished at', time.strftime("%Y-%m-%d %H:%M:%S %A", time.localtime()))
print('============================\n')
| 41.524862
| 149
| 0.562932
|
import requests
import lxml.html
import re
import json
import random
import time
import smtplib
import traceback
myid = "STUDENTID"
mypass = "PASSWORD"
mybound = "BOUNDFIELDS"
mydata = r'FORMDATA'
title = ""
msg = ""
proxies = {"http": None, "https": None}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Cookie": "MESSAGE_TICKET=%7B%22times%22%3A0%7D; ",
"Host": "cas.hrbeu.edu.cn",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362"
}
def findStr(source, target):
return source.find(target) != -1
if __name__ == '__main__':
try:
_login = 'https://cas.hrbeu.edu.cn/cas/login?'
print("============================\n[debug] Begin to login ...")
sesh = requests.session()
req = sesh.get(url_login, proxies=proxies)
html_content = req.text
login_html = lxml.html.fromstring(html_content)
hidden_inputs = login_html.xpath( r'//div[@id="main"]//input[@type="hidden"]')
user_form = {x.attrib["name"]: x.attrib["value"] for x in hidden_inputs}
user_form["username"] = myid
user_form["password"] = mypass
user_form["captcha"] = ''
user_form["submit"] = '登 录'
headers['Cookie'] = headers['Cookie'] + req.headers['Set-cookie']
req.url = f'https://cas.hrbeu.edu.cn/cas/login'
response302 = sesh.post(req.url, data=user_form, headers=headers, proxies=proxies)
kgc_response = sesh.get( "http://jkgc.hrbeu.edu.cn/infoplus/form/JSXNYQSBtest/start", proxies=proxies)
headers['Accept'] = '*/*'
headers['Cookie'] = jkgc_response.request.headers['Cookie']
headers['Host'] = 'jkgc.hrbeu.edu.cn'
headers['Referer'] = jkgc_response.url
jkgc_html = lxml.html.fromstring(jkgc_response.text)
csrfToken = jkgc_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken = csrfToken.pop().attrib["content"]
jkgc_form = {
'idc': 'JSXNYQSBtest',
'release': '',
'csrfToken': csrfToken,
'formData': {
'_VAR_URL': jkgc_response.url,
'_VAR_URL_Attr': {}
}
}
jkgc_form['formData'] = json.dumps(jkgc_form['formData'])
jkgc_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/start'
response3 = sesh.post(jkgc_url, data=jkgc_form, headers=headers, proxies=proxies)
orm_url = json.loads(response3.text)['entities'][0]
form_response = sesh.get(form_url)
headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
headers['Referer'] = form_url
headers['X-Requested-With'] = 'XMLHttpRequest'
submit_url = 'http://jkgc.hrbeu.edu.cn/infoplus/interface/doAction'
submit_html = lxml.html.fromstring(form_response.text)
csrfToken2 = submit_html.xpath(r'//meta[@itemscope="csrfToken"]')
csrfToken2 = csrfToken2.pop().attrib["content"]
submit_form = {
'actionId': '1',
'boundFields': mybound,
'csrfToken': csrfToken2,
'formData': mydata,
'lang': 'zh',
'nextUsers': '{}',
'rand': str(random.random() * 999),
'remark': '',
'stepId': re.match(r'.*form/(\d*?)/', form_response.url).group(1),
'timestamp': str(int(time.time()+0.5))
}
response_end = sesh.post(submit_url, data=submit_form, headers=headers, proxies=proxies)
resJson = json.loads(response_end.text)
t('[debug] Form url: ', form_response.url)
print('[debug] Form Status: ', resJson['ecode'])
print('[debug] Form stJson: ', resJson)
esJson['errno'] == 0):
print('[info] Checkin succeed with jsoncode', resJson['ecode'])
title = f'打卡成功 <{submit_form["stepId"]}>'
msg = '\t表单地址: ' + form_response.url + '\n\n\t表单状态: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
else:
print('[error] Checkin error with jsoncode', resJson['ecode'])
title = f'打卡失败!校网出错'
msg = '\t表单地址: ' + form_response.url + '\n\n\t错误信息: \n\t\terrno:' + str(resJson['errno']) + '\n\t\tecode:' + str(
resJson['ecode']) + '\n\t\tentities:' + str(resJson['entities']) + '\n\n\n\t完整返回:' + response_end.text
except:
print('\n[error] :.:.:.:.: Except return :.:.:.:.:')
err = traceback.format_exc()
print('[error] Python Error: \n', err)
title = '打卡失败!脚本出错'
msg = '\t脚本报错: \n\n\t' + err + '============================\n'
finally:
print(':.:.:.:.: Finally :.:.:.:.:')
print('[info] Task Finished at', time.strftime("%Y-%m-%d %H:%M:%S %A", time.localtime()))
print('============================\n')
| true
| true
|
7907176c72d8480869915c1f61fc92cd1e229bf5
| 5,343
|
py
|
Python
|
main_test.py
|
WenZhihao666/TREND
|
ca4b17139b5f24d44d9421fed92021eb7a95ed6d
|
[
"MIT"
] | 2
|
2022-03-21T05:30:46.000Z
|
2022-03-21T05:35:37.000Z
|
main_test.py
|
WenZhihao666/TREND
|
ca4b17139b5f24d44d9421fed92021eb7a95ed6d
|
[
"MIT"
] | null | null | null |
main_test.py
|
WenZhihao666/TREND
|
ca4b17139b5f24d44d9421fed92021eb7a95ed6d
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('../')
import torch
import numpy as np
import random
import math
import time
import argparse
from data_tlp_cite import DataHelper_t
from torch.utils.data import DataLoader
from model import Model
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
FType = torch.FloatTensor
LType = torch.LongTensor
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def main(args):
setup_seed(args.seed)
Data = DataHelper_t(args.file_path, args.node_feature_path, args.neg_size, args.hist_len, args.directed,
tlp_flag=args.tlp_flag)
loader = DataLoader(Data, batch_size=args.batch_size, shuffle=False, num_workers=5)
model = Model(args).to(device)
model.load_state_dict(torch.load('../res/cite/model.pkl'))
s_emb_list = []
t_emb_list = []
dup_s_emb_list = []
neg_embs_list = []
loss_list = []
model.eval()
for i_batch, sample_batched in enumerate(loader):
loss, s_emb, t_emb, dup_s_emb, neg_embs = model.forward(
sample_batched['s_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['s_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['s_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['event_time'].type(FType).to(device),
sample_batched['s_history_times'].type(FType).to(device),
sample_batched['s_his_his_times_list'].type(FType).to(device),
sample_batched['t_history_times'].type(FType).to(device),
sample_batched['t_his_his_times_list'].type(FType).to(device),
sample_batched['neg_his_times_list'].type(FType).to(device),
sample_batched['neg_his_his_times_list'].type(FType).to(device),
sample_batched['s_edge_rate'].type(FType).to(device),
training=False
)
s_emb_list.append(s_emb)
t_emb_list.append(t_emb)
dup_s_emb_list.append(dup_s_emb.reshape(-1, args.out_dim))
neg_embs_list.append(neg_embs.reshape(-1, args.out_dim))
loss_list.append(loss)
s_emb_list = torch.cat(s_emb_list, dim=0)
t_emb_list = torch.cat(t_emb_list, dim=0)
dup_s_emb_list = torch.cat(dup_s_emb_list, dim=0)
neg_embs_list = torch.cat(neg_embs_list, dim=0)
truth = torch.ones(s_emb_list.size(0), dtype=torch.int)
truth_neg = torch.zeros(neg_embs_list.size(0), dtype=torch.int)
s_list = torch.cat((s_emb_list, dup_s_emb_list), dim=0)
t_list = torch.cat((t_emb_list, neg_embs_list), dim=0)
truth_list = torch.cat((truth, truth_neg), dim=0)
dif_list = torch.abs(s_list - t_list)
x_train, x_test, y_train, y_test = train_test_split(dif_list, truth_list, test_size=1 - args.train_ratio,
random_state=args.seed, stratify=truth_list)
lr = LogisticRegression(max_iter=10000)
lr.fit(x_train, y_train)
y_test_pred = lr.predict(x_test)
acc = accuracy_score(y_test, y_test_pred)
f1 = f1_score(y_test, y_test_pred)
print('acc:{}'.format(round(acc, 4)))
print('f1:{}'.format(round(f1, 4)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str, default='./data/cite/emb_edges.pt')
parser.add_argument('--node_feature_path', type=str, default='./data/cite/sorted_emb_feat.pt')
parser.add_argument('--neg_size', type=int, default=1)
parser.add_argument('--hist_len', type=int, default=10)
parser.add_argument('--directed', type=bool, default=False)
parser.add_argument('--epoch_num', type=int, default=10, help='epoch number')
parser.add_argument('--tlp_flag', type=bool, default=True)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--hid_dim', type=int, default=16)
parser.add_argument('--feat_dim', type=int, default=128)
parser.add_argument('--out_dim', type=int, default=16)
parser.add_argument('--seed', type=int, default=4)
parser.add_argument('--ncoef', type=float, default=0.01)
parser.add_argument('--l2_reg', type=float, default=0.001)
parser.add_argument('--train_ratio', type=float, default=0.8)
args = parser.parse_args()
start = time.perf_counter()
main(args)
| 41.418605
| 109
| 0.688564
|
import sys
sys.path.append('../')
import torch
import numpy as np
import random
import math
import time
import argparse
from data_tlp_cite import DataHelper_t
from torch.utils.data import DataLoader
from model import Model
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
FType = torch.FloatTensor
LType = torch.LongTensor
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def main(args):
setup_seed(args.seed)
Data = DataHelper_t(args.file_path, args.node_feature_path, args.neg_size, args.hist_len, args.directed,
tlp_flag=args.tlp_flag)
loader = DataLoader(Data, batch_size=args.batch_size, shuffle=False, num_workers=5)
model = Model(args).to(device)
model.load_state_dict(torch.load('../res/cite/model.pkl'))
s_emb_list = []
t_emb_list = []
dup_s_emb_list = []
neg_embs_list = []
loss_list = []
model.eval()
for i_batch, sample_batched in enumerate(loader):
loss, s_emb, t_emb, dup_s_emb, neg_embs = model.forward(
sample_batched['s_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['s_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['s_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['event_time'].type(FType).to(device),
sample_batched['s_history_times'].type(FType).to(device),
sample_batched['s_his_his_times_list'].type(FType).to(device),
sample_batched['t_history_times'].type(FType).to(device),
sample_batched['t_his_his_times_list'].type(FType).to(device),
sample_batched['neg_his_times_list'].type(FType).to(device),
sample_batched['neg_his_his_times_list'].type(FType).to(device),
sample_batched['s_edge_rate'].type(FType).to(device),
training=False
)
s_emb_list.append(s_emb)
t_emb_list.append(t_emb)
dup_s_emb_list.append(dup_s_emb.reshape(-1, args.out_dim))
neg_embs_list.append(neg_embs.reshape(-1, args.out_dim))
loss_list.append(loss)
s_emb_list = torch.cat(s_emb_list, dim=0)
t_emb_list = torch.cat(t_emb_list, dim=0)
dup_s_emb_list = torch.cat(dup_s_emb_list, dim=0)
neg_embs_list = torch.cat(neg_embs_list, dim=0)
truth = torch.ones(s_emb_list.size(0), dtype=torch.int)
truth_neg = torch.zeros(neg_embs_list.size(0), dtype=torch.int)
s_list = torch.cat((s_emb_list, dup_s_emb_list), dim=0)
t_list = torch.cat((t_emb_list, neg_embs_list), dim=0)
truth_list = torch.cat((truth, truth_neg), dim=0)
dif_list = torch.abs(s_list - t_list)
x_train, x_test, y_train, y_test = train_test_split(dif_list, truth_list, test_size=1 - args.train_ratio,
random_state=args.seed, stratify=truth_list)
lr = LogisticRegression(max_iter=10000)
lr.fit(x_train, y_train)
y_test_pred = lr.predict(x_test)
acc = accuracy_score(y_test, y_test_pred)
f1 = f1_score(y_test, y_test_pred)
print('acc:{}'.format(round(acc, 4)))
print('f1:{}'.format(round(f1, 4)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str, default='./data/cite/emb_edges.pt')
parser.add_argument('--node_feature_path', type=str, default='./data/cite/sorted_emb_feat.pt')
parser.add_argument('--neg_size', type=int, default=1)
parser.add_argument('--hist_len', type=int, default=10)
parser.add_argument('--directed', type=bool, default=False)
parser.add_argument('--epoch_num', type=int, default=10, help='epoch number')
parser.add_argument('--tlp_flag', type=bool, default=True)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--hid_dim', type=int, default=16)
parser.add_argument('--feat_dim', type=int, default=128)
parser.add_argument('--out_dim', type=int, default=16)
parser.add_argument('--seed', type=int, default=4)
parser.add_argument('--ncoef', type=float, default=0.01)
parser.add_argument('--l2_reg', type=float, default=0.001)
parser.add_argument('--train_ratio', type=float, default=0.8)
args = parser.parse_args()
start = time.perf_counter()
main(args)
| true
| true
|
7907177ab357f99cc18d74cefb8094ccd7cbf455
| 63,787
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/aio/operations/_network_interfaces_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations:
"""NetworkInterfacesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterface":
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.NetworkInterface"]:
"""Updates a network interface tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to update network interface tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
async def _get_effective_route_table_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> Optional["_models.EffectiveRouteListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._get_effective_route_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def begin_get_effective_route_table(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller["_models.EffectiveRouteListResult"]:
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
async def _list_effective_network_security_groups_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> Optional["_models.EffectiveNetworkSecurityGroupListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveNetworkSecurityGroupListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self._list_effective_network_security_groups_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
async def begin_list_effective_network_security_groups(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller["_models.EffectiveNetworkSecurityGroupListResult"]:
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_07_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveNetworkSecurityGroupListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'} # type: ignore
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'} # type: ignore
async def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterface":
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_network_interface.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_virtual_machine_scale_set_ip_configurations(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_ip_configurations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get_virtual_machine_scale_set_ip_configuration(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterfaceIPConfiguration":
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration.
:type ip_configuration_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_ip_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
| 52.284426
| 354
| 0.680107
|
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
async def begin_delete(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
async def get(
self,
resource_group_name: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
async def _create_or_update_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
async def begin_create_or_update(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.NetworkInterface",
**kwargs
) -> AsyncLROPoller["_models.NetworkInterface"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
async def _update_tags_initial(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_tags_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
async def begin_update_tags(
self,
resource_group_name: str,
network_interface_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.NetworkInterface"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'}
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'}
async def _get_effective_route_table_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> Optional["_models.EffectiveRouteListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self._get_effective_route_table_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'}
async def begin_get_effective_route_table(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller["_models.EffectiveRouteListResult"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'}
async def _list_effective_network_security_groups_initial(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> Optional["_models.EffectiveNetworkSecurityGroupListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self._list_effective_network_security_groups_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'}
async def begin_list_effective_network_security_groups(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncLROPoller["_models.EffectiveNetworkSecurityGroupListResult"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'}
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'}
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'}
async def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterface":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
url = self.get_virtual_machine_scale_set_network_interface.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'}
def list_virtual_machine_scale_set_ip_configurations(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
expand: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_ip_configurations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations'}
async def get_virtual_machine_scale_set_ip_configuration(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkInterfaceIPConfiguration":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
accept = "application/json"
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_ip_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'}
| true
| true
|
7907188b3a86de5b78cf320d8b128b1c60427cb3
| 10,628
|
py
|
Python
|
tensorflow/contrib/model_pruning/python/pruning_test.py
|
khanhlvg/tensorflow
|
a59b74ccaafae59d616ecf08204d63023ff6f49c
|
[
"Apache-2.0"
] | 2
|
2019-06-28T17:43:04.000Z
|
2019-06-28T17:43:07.000Z
|
tensorflow/contrib/model_pruning/python/pruning_test.py
|
khanhlvg/tensorflow
|
a59b74ccaafae59d616ecf08204d63023ff6f49c
|
[
"Apache-2.0"
] | 8
|
2019-07-08T10:09:18.000Z
|
2019-09-26T20:55:43.000Z
|
tensorflow/contrib/model_pruning/python/pruning_test.py
|
khanhlvg/tensorflow
|
a59b74ccaafae59d616ecf08204d63023ff6f49c
|
[
"Apache-2.0"
] | 1
|
2020-07-27T13:51:52.000Z
|
2020-07-27T13:51:52.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the key functions in pruning library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
class PruningHParamsTest(test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"sparsity_function_end_step=100", "target_sparsity=0.9",
"weight_sparsity_map=[conv1:0.8,conv2/kernel:0.8]"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningHParamsTest, self).setUp()
# Add global step variable to the graph
self.global_step = training_util.get_or_create_global_step()
# Add sparsity
self.sparsity = variables.VariableV1(0.5, name="sparsity")
# Parse hparams
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
def testInit(self):
p = pruning.Pruning(self.pruning_hparams)
self.assertEqual(p._spec.name, "test")
self.assertAlmostEqual(p._spec.threshold_decay, 0.9)
self.assertEqual(p._spec.pruning_frequency, 10)
self.assertEqual(p._spec.sparsity_function_end_step, 100)
self.assertAlmostEqual(p._spec.target_sparsity, 0.9)
self.assertEqual(p._weight_sparsity_map["conv1"], 0.8)
self.assertEqual(p._weight_sparsity_map["conv2/kernel"], 0.8)
def testInitWithExternalSparsity(self):
with self.cached_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
def testInitWithVariableReuse(self):
with self.cached_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
p_copy = pruning.Pruning(
spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
self.assertEqual(p._sparsity.eval(), p_copy._sparsity.eval())
class PruningTest(test.TestCase):
def setUp(self):
super(PruningTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
def testCreateMask2D(self):
width = 10
height = 20
with self.cached_session():
weights = variables.VariableV1(
random_ops.random_normal([width, height], stddev=1), name="weights")
masked_weights = pruning.apply_mask(weights,
variable_scope.get_variable_scope())
variables.global_variables_initializer().run()
weights_val = weights.eval()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(weights_val, masked_weights_val)
def testUpdateSingleMask(self):
with self.cached_session() as session:
weights = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.VariableV1(0.95, name="sparsity")
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 5)
def _blockMasking(self, hparams, weights, expected_mask):
threshold = variables.VariableV1(0.0, name="threshold")
sparsity = variables.VariableV1(0.5, name="sparsity")
test_spec = ",".join(hparams)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
with self.cached_session():
variables.global_variables_initializer().run()
_, new_mask = p._maybe_update_block_mask(weights, threshold)
# Check if the mask is the same size as the weights
self.assertAllEqual(new_mask.get_shape(), weights.get_shape())
mask_val = new_mask.eval()
self.assertAllEqual(mask_val, expected_mask)
def testBlockMasking(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
weights_avg = constant_op.constant(
[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]])
weights_max = constant_op.constant(
[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]])
expected_mask = [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"], weights_avg,
expected_mask)
def testBlockMaskingWithHigherDimensions(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
# Weights as in testBlockMasking, but with one extra dimension.
weights_avg = constant_op.constant(
[[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]]])
weights_max = constant_op.constant(
[[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]]])
expected_mask = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"],
weights_avg, expected_mask)
def testPartitionedVariableMasking(self):
partitioner = partitioned_variables.variable_axis_size_partitioner(40)
with self.cached_session() as session:
with variable_scope.variable_scope("", partitioner=partitioner):
sparsity = variables.VariableV1(0.5, name="Sparsity")
weights = variable_scope.get_variable(
"weights", initializer=math_ops.linspace(1.0, 100.0, 100))
masked_weights = pruning.apply_mask(
weights, scope=variable_scope.get_variable_scope())
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 50)
def testConditionalMaskUpdate(self):
param_list = [
"pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6",
"nbins=100"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
weights = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.VariableV1(0.00, name="sparsity")
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.conditional_mask_update_op()
sparsity_val = math_ops.linspace(0.0, 0.9, 10)
increment_global_step = state_ops.assign_add(self.global_step, 1)
non_zero_count = []
with self.cached_session() as session:
variables.global_variables_initializer().run()
for i in range(10):
session.run(state_ops.assign(sparsity, sparsity_val[i]))
session.run(mask_update_op)
session.run(increment_global_step)
non_zero_count.append(np.count_nonzero(masked_weights.eval()))
# Weights pruned at steps 0,2,4,and,6
expected_non_zero_count = [100, 100, 80, 80, 60, 60, 40, 40, 40, 40]
self.assertAllEqual(expected_non_zero_count, non_zero_count)
def testWeightSpecificSparsity(self):
param_list = [
"begin_pruning_step=1", "pruning_frequency=1", "end_pruning_step=100",
"target_sparsity=0.5",
"weight_sparsity_map=[layer1:0.6,layer2/weights:0.75,.*kernel:0.6]",
"threshold_decay=0.0"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
with variable_scope.variable_scope("layer1"):
w1 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w1)
with variable_scope.variable_scope("layer2"):
w2 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w2)
with variable_scope.variable_scope("layer3"):
w3 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="kernel")
_ = pruning.apply_mask(w3)
p = pruning.Pruning(pruning_hparams)
mask_update_op = p.conditional_mask_update_op()
increment_global_step = state_ops.assign_add(self.global_step, 1)
with self.cached_session() as session:
variables.global_variables_initializer().run()
for _ in range(110):
session.run(mask_update_op)
session.run(increment_global_step)
self.assertAllClose(
session.run(pruning.get_weight_sparsity()), [0.6, 0.75, 0.6])
if __name__ == "__main__":
test.main()
| 41.84252
| 80
| 0.684136
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
class PruningHParamsTest(test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"sparsity_function_end_step=100", "target_sparsity=0.9",
"weight_sparsity_map=[conv1:0.8,conv2/kernel:0.8]"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningHParamsTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
self.sparsity = variables.VariableV1(0.5, name="sparsity")
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
def testInit(self):
p = pruning.Pruning(self.pruning_hparams)
self.assertEqual(p._spec.name, "test")
self.assertAlmostEqual(p._spec.threshold_decay, 0.9)
self.assertEqual(p._spec.pruning_frequency, 10)
self.assertEqual(p._spec.sparsity_function_end_step, 100)
self.assertAlmostEqual(p._spec.target_sparsity, 0.9)
self.assertEqual(p._weight_sparsity_map["conv1"], 0.8)
self.assertEqual(p._weight_sparsity_map["conv2/kernel"], 0.8)
def testInitWithExternalSparsity(self):
with self.cached_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
def testInitWithVariableReuse(self):
with self.cached_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
p_copy = pruning.Pruning(
spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
self.assertEqual(p._sparsity.eval(), p_copy._sparsity.eval())
class PruningTest(test.TestCase):
def setUp(self):
super(PruningTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
def testCreateMask2D(self):
width = 10
height = 20
with self.cached_session():
weights = variables.VariableV1(
random_ops.random_normal([width, height], stddev=1), name="weights")
masked_weights = pruning.apply_mask(weights,
variable_scope.get_variable_scope())
variables.global_variables_initializer().run()
weights_val = weights.eval()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(weights_val, masked_weights_val)
def testUpdateSingleMask(self):
with self.cached_session() as session:
weights = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.VariableV1(0.95, name="sparsity")
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 5)
def _blockMasking(self, hparams, weights, expected_mask):
threshold = variables.VariableV1(0.0, name="threshold")
sparsity = variables.VariableV1(0.5, name="sparsity")
test_spec = ",".join(hparams)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
with self.cached_session():
variables.global_variables_initializer().run()
_, new_mask = p._maybe_update_block_mask(weights, threshold)
self.assertAllEqual(new_mask.get_shape(), weights.get_shape())
mask_val = new_mask.eval()
self.assertAllEqual(mask_val, expected_mask)
def testBlockMasking(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
weights_avg = constant_op.constant(
[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]])
weights_max = constant_op.constant(
[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]])
expected_mask = [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"], weights_avg,
expected_mask)
def testBlockMaskingWithHigherDimensions(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
weights_avg = constant_op.constant(
[[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]]])
weights_max = constant_op.constant(
[[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]]])
expected_mask = [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
[1., 1., 1., 1.], [1., 1., 1., 1.]]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"],
weights_avg, expected_mask)
def testPartitionedVariableMasking(self):
partitioner = partitioned_variables.variable_axis_size_partitioner(40)
with self.cached_session() as session:
with variable_scope.variable_scope("", partitioner=partitioner):
sparsity = variables.VariableV1(0.5, name="Sparsity")
weights = variable_scope.get_variable(
"weights", initializer=math_ops.linspace(1.0, 100.0, 100))
masked_weights = pruning.apply_mask(
weights, scope=variable_scope.get_variable_scope())
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 50)
def testConditionalMaskUpdate(self):
param_list = [
"pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6",
"nbins=100"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
weights = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.VariableV1(0.00, name="sparsity")
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.conditional_mask_update_op()
sparsity_val = math_ops.linspace(0.0, 0.9, 10)
increment_global_step = state_ops.assign_add(self.global_step, 1)
non_zero_count = []
with self.cached_session() as session:
variables.global_variables_initializer().run()
for i in range(10):
session.run(state_ops.assign(sparsity, sparsity_val[i]))
session.run(mask_update_op)
session.run(increment_global_step)
non_zero_count.append(np.count_nonzero(masked_weights.eval()))
expected_non_zero_count = [100, 100, 80, 80, 60, 60, 40, 40, 40, 40]
self.assertAllEqual(expected_non_zero_count, non_zero_count)
def testWeightSpecificSparsity(self):
param_list = [
"begin_pruning_step=1", "pruning_frequency=1", "end_pruning_step=100",
"target_sparsity=0.5",
"weight_sparsity_map=[layer1:0.6,layer2/weights:0.75,.*kernel:0.6]",
"threshold_decay=0.0"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
with variable_scope.variable_scope("layer1"):
w1 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w1)
with variable_scope.variable_scope("layer2"):
w2 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="weights")
_ = pruning.apply_mask(w2)
with variable_scope.variable_scope("layer3"):
w3 = variables.VariableV1(
math_ops.linspace(1.0, 100.0, 100), name="kernel")
_ = pruning.apply_mask(w3)
p = pruning.Pruning(pruning_hparams)
mask_update_op = p.conditional_mask_update_op()
increment_global_step = state_ops.assign_add(self.global_step, 1)
with self.cached_session() as session:
variables.global_variables_initializer().run()
for _ in range(110):
session.run(mask_update_op)
session.run(increment_global_step)
self.assertAllClose(
session.run(pruning.get_weight_sparsity()), [0.6, 0.75, 0.6])
if __name__ == "__main__":
test.main()
| true
| true
|
790718cc5c6d7c13093c48cc4b586fa7fd6fc109
| 2,125
|
py
|
Python
|
spider/utilities/util_urlfilter.py
|
charlesXu86/PSpider
|
98277905508d706d9b0ea2ac2854b41ae0b06fe3
|
[
"BSD-2-Clause"
] | 1
|
2022-02-21T03:30:52.000Z
|
2022-02-21T03:30:52.000Z
|
spider/utilities/util_urlfilter.py
|
charlesXu86/PSpider
|
98277905508d706d9b0ea2ac2854b41ae0b06fe3
|
[
"BSD-2-Clause"
] | null | null | null |
spider/utilities/util_urlfilter.py
|
charlesXu86/PSpider
|
98277905508d706d9b0ea2ac2854b41ae0b06fe3
|
[
"BSD-2-Clause"
] | null | null | null |
# _*_ coding: utf-8 _*_
"""
util_urlfilter.py by xianhu
"""
import re
import pybloom_live
from .util_config import CONFIG_URLPATTERN_ALL
class UrlFilter(object):
"""
class of UrlFilter, to filter url by regexs and (bloomfilter or set)
"""
def __init__(self, black_patterns=(CONFIG_URLPATTERN_ALL,), white_patterns=(r"^http",), capacity=None):
"""
constructor, use variable of BloomFilter if capacity else variable of set
"""
self._re_black_list = [re.compile(pattern, flags=re.IGNORECASE) for pattern in black_patterns] if black_patterns else []
self._re_white_list = [re.compile(pattern, flags=re.IGNORECASE) for pattern in white_patterns] if white_patterns else []
self._url_set = set() if not capacity else None
self._bloom_filter = pybloom_live.ScalableBloomFilter(capacity, error_rate=0.001) if capacity else None
return
def update(self, url_list):
"""
update this urlfilter using url_list
"""
if self._url_set is not None:
self._url_set.update(url_list)
else:
for url in url_list:
self._bloom_filter.add(url)
return
def check(self, url):
"""
check the url based on self._re_black_list and self._re_white_list
"""
# if url in black_list, return False
for re_black in self._re_black_list:
if re_black.search(url):
return False
# if url in white_list, return True
for re_white in self._re_white_list:
if re_white.search(url):
return True
return False if self._re_white_list else True
def check_and_add(self, url):
"""
check the url to make sure that the url hasn't been fetched, and add url to urlfilter
"""
result = False
if self.check(url):
if self._url_set is not None:
result = url not in self._url_set
self._url_set.add(url)
else:
result = not self._bloom_filter.add(url)
return result
| 31.716418
| 128
| 0.618824
|
import re
import pybloom_live
from .util_config import CONFIG_URLPATTERN_ALL
class UrlFilter(object):
def __init__(self, black_patterns=(CONFIG_URLPATTERN_ALL,), white_patterns=(r"^http",), capacity=None):
self._re_black_list = [re.compile(pattern, flags=re.IGNORECASE) for pattern in black_patterns] if black_patterns else []
self._re_white_list = [re.compile(pattern, flags=re.IGNORECASE) for pattern in white_patterns] if white_patterns else []
self._url_set = set() if not capacity else None
self._bloom_filter = pybloom_live.ScalableBloomFilter(capacity, error_rate=0.001) if capacity else None
return
def update(self, url_list):
if self._url_set is not None:
self._url_set.update(url_list)
else:
for url in url_list:
self._bloom_filter.add(url)
return
def check(self, url):
for re_black in self._re_black_list:
if re_black.search(url):
return False
for re_white in self._re_white_list:
if re_white.search(url):
return True
return False if self._re_white_list else True
def check_and_add(self, url):
result = False
if self.check(url):
if self._url_set is not None:
result = url not in self._url_set
self._url_set.add(url)
else:
result = not self._bloom_filter.add(url)
return result
| true
| true
|
79071902bf06f1aa91df391214ad289f5b2fa66a
| 41,449
|
py
|
Python
|
_py2tmp/testing/utils.py
|
DalavanCloud/tmppy
|
cdde676ba9d5011b7d2a46a9852e5986b90edbbc
|
[
"Apache-2.0"
] | 1
|
2018-09-01T18:14:26.000Z
|
2018-09-01T18:14:26.000Z
|
_py2tmp/testing/utils.py
|
DalavanCloud/tmppy
|
cdde676ba9d5011b7d2a46a9852e5986b90edbbc
|
[
"Apache-2.0"
] | null | null | null |
_py2tmp/testing/utils.py
|
DalavanCloud/tmppy
|
cdde676ba9d5011b7d2a46a9852e5986b90edbbc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
import tempfile
import unittest
import textwrap
import re
import sys
import itertools
import subprocess
from functools import wraps
import difflib
import pytest
import py2tmp_test_config as config
import typed_ast.ast3 as ast
from _py2tmp import (
ast_to_ir3,
ir3_to_ir2,
ir2_to_ir1,
ir1_to_ir0,
optimize_ir3,
optimize_ir0,
ir0_to_cpp,
ir0,
utils,
)
def pretty_print_command(command):
return ' '.join('"' + x + '"' for x in command)
def add_line_numbers(source_code):
lines = source_code.splitlines()
last_line_num_length = len(str(len(lines)))
return '\n'.join('%%%sd: %%s' % last_line_num_length % (n + 1, line) for n, line in enumerate(lines))
class CommandFailedException(Exception):
def __init__(self, command, stdout, stderr, error_code):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Exit code {error_code}
Stdout:
{stdout}
Stderr:
{stderr}
''').format(command=pretty_print_command(self.command), error_code=self.error_code, stdout=self.stdout, stderr=self.stderr)
def run_command(executable, args=[]):
command = [executable] + args
print('Executing command:', pretty_print_command(command))
try:
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
except Exception as e:
raise Exception("While executing: %s" % command)
if p.returncode != 0:
raise CommandFailedException(command, stdout, stderr, p.returncode)
print('Execution successful.')
print('stdout:')
print(stdout)
print('')
print('stderr:')
print(stderr)
print('')
return (stdout, stderr)
def run_compiled_executable(executable):
run_command(executable)
class CompilationFailedException(Exception):
def __init__(self, command, error_message):
self.command = command
self.error_message = error_message
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Error message:
{error_message}
''').format(command=pretty_print_command(self.command), error_message=self.error_message)
class PosixCompiler:
def __init__(self):
self.executable = config.CXX
self.name = config.CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['-c', source, '-o', os.path.devnull]
self._compile(include_dirs, args=args)
except CommandFailedException as e:
raise CompilationFailedException(e.command, e.stderr)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ args
+ ['-o', output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
['-W', '-Wall', '-g0', '-Werror', '-std=c++11']
+ include_flags
+ args
)
run_command(self.executable, args)
class MsvcCompiler:
def __init__(self):
self.executable = config.CXX
self.name = config.CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['/c', source]
self._compile(include_dirs, args = args)
except CommandFailedException as e:
# Note that we use stdout here, unlike above. MSVC reports compilation warnings and errors on stdout.
raise CompilationFailedException(e.command, e.stdout)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ args
+ ['/Fe' + output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
['/nologo', '/FS', '/W4', '/D_SCL_SECURE_NO_WARNINGS', '/WX']
+ include_flags
+ args
)
run_command(self.executable, args)
if config.CXX_COMPILER_NAME == 'MSVC':
compiler = MsvcCompiler()
py2tmp_error_message_extraction_regex = 'error C2338: (.*)'
else:
compiler = PosixCompiler()
py2tmp_error_message_extraction_regex = 'static.assert(.*)'
_assert_helper = unittest.TestCase()
def _create_temporary_file(file_content, file_name_suffix=''):
file_descriptor, file_name = tempfile.mkstemp(text=True, suffix=file_name_suffix)
file = os.fdopen(file_descriptor, mode='w')
file.write(file_content)
file.close()
return file_name
def _cap_to_lines(s, n):
lines = s.splitlines()
if len(lines) <= n:
return s
else:
return '\n'.join(lines[0:n] + ['...'])
def try_remove_temporary_file(filename):
try:
os.remove(filename)
except:
# When running tests on Windows using Appveyor, the remove command fails for temporary files sometimes.
# This shouldn't cause the tests to fail, so we ignore the exception and go ahead.
pass
def expect_cpp_code_compile_error_helper(check_error_fun, tmppy_source, module_ir2, module_ir1, cxx_source):
source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp')
try:
compiler.compile_discarding_output(
source=source_file_name,
include_dirs=[config.MPYL_INCLUDE_DIR],
args=[])
pytest.fail(textwrap.dedent('''\
The test should have failed to compile, but it compiled successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cxx_source = add_line_numbers(cxx_source)),
pytrace=False)
except CompilationFailedException as e1:
e = e1
error_message = e.error_message
error_message_lines = error_message.splitlines()
# Different compilers output a different number of spaces when pretty-printing types.
# When using libc++, sometimes std::foo identifiers are reported as std::__1::foo.
normalized_error_message = error_message.replace(' ', '').replace('std::__1::', 'std::')
normalized_error_message_lines = normalized_error_message.splitlines()
error_message_head = _cap_to_lines(error_message, 40)
check_error_fun(e, error_message_lines, error_message_head, normalized_error_message_lines)
try_remove_temporary_file(source_file_name)
def expect_cpp_code_generic_compile_error(expected_error_regex, tmppy_source, module_ir2, module_ir1, cxx_source):
"""
Tests that the given source produces the expected error during compilation.
:param expected_error_regex: A regex used to match the _py2tmp error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
:param cxx_source: The second part of the source code. This will be dedented.
"""
expected_error_regex = expected_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines):
for line in normalized_error_message_lines:
if re.search(expected_error_regex, line):
return
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain that.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cxx_source}
''').format(expected_error = expected_error_regex,
compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_compile_error(
expected_py2tmp_error_regex,
expected_py2tmp_error_desc_regex,
tmppy_source,
module_ir2,
module_ir1,
cxx_source):
"""
Tests that the given source produces the expected error during compilation.
:param expected_py2tmp_error_regex: A regex used to match the _py2tmp error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
:param expected_py2tmp_error_desc_regex: A regex used to match the _py2tmp error description,
e.g. 'No explicit binding was found for C, and C is an abstract class'.
:param source_code: The C++ source code. This will be dedented.
:param ignore_deprecation_warnings: A boolean. If True, deprecation warnings will be ignored.
"""
if '\n' in expected_py2tmp_error_regex:
raise Exception('expected_py2tmp_error_regex should not contain newlines')
if '\n' in expected_py2tmp_error_desc_regex:
raise Exception('expected_py2tmp_error_desc_regex should not contain newlines')
expected_py2tmp_error_regex = expected_py2tmp_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines):
for line_number, line in enumerate(normalized_error_message_lines):
match = re.search('tmppy::impl::(.*Error<.*>)', line)
if match:
actual_py2tmp_error_line_number = line_number
actual_py2tmp_error = match.groups()[0]
if config.CXX_COMPILER_NAME == 'MSVC':
# MSVC errors are of the form:
#
# C:\Path\To\header\foo.h(59): note: see reference to class template instantiation 'tmppy::impl::MyError<X, Y>' being compiled
# with
# [
# X=int,
# Y=double
# ]
#
# So we need to parse the following few lines and use them to replace the placeholder types in the tmppy error type.
try:
replacement_lines = []
if normalized_error_message_lines[line_number + 1].strip() == 'with':
for line in itertools.islice(normalized_error_message_lines, line_number + 3, None):
line = line.strip()
if line == ']':
break
if line.endswith(','):
line = line[:-1]
replacement_lines.append(line)
for replacement_line in replacement_lines:
match = re.search('([A-Za-z0-9_-]*)=(.*)', replacement_line)
if not match:
raise Exception('Failed to parse replacement line: %s' % replacement_line) from e
(type_variable, type_expression) = match.groups()
actual_py2tmp_error = re.sub(r'\b' + type_variable + r'\b', type_expression, actual_py2tmp_error)
except Exception:
raise Exception('Failed to parse MSVC template type arguments')
break
else:
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain user-facing _py2tmp errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(expected_error = expected_py2tmp_error_regex,
compiler_command = e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
for line_number, line in enumerate(error_message_lines):
match = re.search(py2tmp_error_message_extraction_regex, line)
if match:
actual_static_assert_error_line_number = line_number
actual_static_assert_error = match.groups()[0]
break
else:
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain static_assert errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(expected_error = expected_py2tmp_error_regex,
compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
try:
regex_search_result = re.search(expected_py2tmp_error_regex, actual_py2tmp_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_py2tmp_error_regex) from e
if not regex_search_result:
pytest.fail(
textwrap.dedent('''\
The compilation failed as expected, but with a different error type.
Expected _py2tmp error type: {expected_py2tmp_error_regex}
Error type was: {actual_py2tmp_error}
Expected static assert error: {expected_py2tmp_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(expected_py2tmp_error_regex = expected_py2tmp_error_regex,
actual_py2tmp_error = actual_py2tmp_error,
expected_py2tmp_error_desc_regex = expected_py2tmp_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
try:
regex_search_result = re.search(expected_py2tmp_error_desc_regex, actual_static_assert_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_py2tmp_error_desc_regex) from e
if not regex_search_result:
pytest.fail(
textwrap.dedent('''\
The compilation failed as expected, but with a different error message.
Expected _py2tmp error type: {expected_py2tmp_error_regex}
Error type was: {actual_py2tmp_error}
Expected static assert error: {expected_py2tmp_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(expected_py2tmp_error_regex = expected_py2tmp_error_regex,
actual_py2tmp_error = actual_py2tmp_error,
expected_py2tmp_error_desc_regex = expected_py2tmp_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
# 6 is just a constant that works for both g++ (<=6.0.0 at least) and clang++ (<=4.0.0 at least).
# It might need to be changed.
if actual_py2tmp_error_line_number > 6 or actual_static_assert_error_line_number > 6:
pytest.fail(
textwrap.dedent('''\
The compilation failed with the expected message, but the error message contained too many lines before the relevant ones.
The error type was reported on line {actual_py2tmp_error_line_number} of the message (should be <=6).
The static assert was reported on line {actual_static_assert_error_line_number} of the message (should be <=6).
Error message:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(actual_py2tmp_error_line_number = actual_py2tmp_error_line_number,
actual_static_assert_error_line_number = actual_static_assert_error_line_number,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
for line in error_message_lines[:max(actual_py2tmp_error_line_number, actual_static_assert_error_line_number)]:
if re.search('tmppy::impl', line):
pytest.fail(
'The compilation failed with the expected message, but the error message contained some metaprogramming types in the output (besides Error). Error message:\n%s' + error_message_head,
pytrace=False)
expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, cxx_source):
"""
Tests that the given source compiles and runs successfully.
:param source_code: The C++ source code. This will be dedented.
"""
if 'main(' not in cxx_source:
cxx_source += textwrap.dedent('''
int main() {
}
''')
source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp')
executable_suffix = {'posix': '', 'nt': '.exe'}[os.name]
output_file_name = _create_temporary_file('', executable_suffix)
e = None
try:
compiler.compile_and_link(
source=source_file_name,
include_dirs=[config.MPYL_INCLUDE_DIR],
output_file_name=output_file_name,
args=[])
except CommandFailedException as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The generated C++ source did not compile.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cxx_source}
''').format(compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = _cap_to_lines(e.stderr, 40)),
pytrace=False)
try:
run_compiled_executable(output_file_name)
except CommandFailedException as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The generated C++ executable did not run successfully.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
C++ source:
{cxx_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
cxx_source = add_line_numbers(cxx_source),
error_message = _cap_to_lines(e.stderr, 40)),
pytrace=False)
# Note that we don't delete the temporary files if the test failed. This is intentional, keeping them around helps debugging the failure.
try_remove_temporary_file(source_file_name)
try_remove_temporary_file(output_file_name)
def _get_function_body(f):
source_code, _ = inspect.getsourcelines(f)
# Skip the annotation and the line where the function is defined.
expected_line = 'def %s():\n' % f.__name__
while source_code[0] != expected_line:
source_code = source_code[1:]
source_code = source_code[1:]
# The body of some tests is a multiline string because they would otherwise cause the pytest test file to fail
# parsing.
if source_code[0].strip() == '\'\'\'' and source_code[-1].strip() == '\'\'\'':
source_code = source_code[1:-1]
return textwrap.dedent(''.join(source_code))
def create_identifier_generator():
def identifier_generator_fun():
for i in itertools.count():
yield 'TmppyInternal_%s' % i
return iter(identifier_generator_fun())
def _convert_tmppy_source_to_ir(python_source, identifier_generator):
filename='<unknown>'
source_ast = ast.parse(python_source, filename)
module_ir3 = ast_to_ir3.module_ast_to_ir3(source_ast, filename, python_source.splitlines())
module_ir3 = optimize_ir3.optimize_module(module_ir3)
module_ir2 = ir3_to_ir2.module_to_ir2(module_ir3, identifier_generator)
module_ir1 = ir2_to_ir1.module_to_ir1(module_ir2)
return module_ir2, module_ir1
def _convert_to_cpp_expecting_success(tmppy_source):
identifier_generator = create_identifier_generator()
try:
module_ir2, module_ir1 = _convert_tmppy_source_to_ir(tmppy_source, identifier_generator)
e = None
except ast_to_ir3.CompilationError as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The conversion from TMPPy to C++ failed.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
error_message = e.args[0]),
pytrace=False)
try:
header = ir1_to_ir0.module_to_ir0(module_ir1, identifier_generator)
header = optimize_ir0.optimize_header(header, identifier_generator, verbose=False)
cpp_source = ir0_to_cpp.header_to_cpp(header, identifier_generator)
cpp_source = utils.clang_format(cpp_source)
return module_ir2, module_ir1, cpp_source
except ast_to_ir3.CompilationError as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The conversion from TMPPy to C++ failed.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
error_message=e.args[0]),
pytrace=False)
def assert_compilation_succeeds(extra_cpp_prelude=''):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, extra_cpp_prelude + cpp_source)
return wrapper
return eval
def assert_code_optimizes_to(expected_cpp_source: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
assert expected_cpp_source[0] == '\n'
if cpp_source != expected_cpp_source[1:]:
pytest.fail(
textwrap.dedent('''\
The generated code didn't match the expected code.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
Generated C++ source:
{cpp_source}
Expected C++ source:
{expected_cpp_source}
Diff:
{cpp_source_diff}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=str(cpp_source),
expected_cpp_source=str(expected_cpp_source[1:]),
cpp_source_diff=''.join(difflib.unified_diff(expected_cpp_source[1:].splitlines(True),
cpp_source.splitlines(True),
fromfile='expected.h',
tofile='actual.h'))),
pytrace=False)
return wrapper
return eval
def assert_compilation_fails(expected_py2tmp_error_regex: str, expected_py2tmp_error_desc_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_compile_error(
expected_py2tmp_error_regex,
expected_py2tmp_error_desc_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
# TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test).
def assert_compilation_fails_with_generic_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_generic_compile_error(
expected_error_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
# TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test).
def assert_compilation_fails_with_static_assert_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_generic_compile_error(
r'(error: static assertion failed: |error: static_assert failed .)' + expected_error_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
def _split_list(l, num_elems_in_chunk):
args = [iter(l)] * num_elems_in_chunk
return list(itertools.zip_longest(*args))
def _get_line_from_diagnostic(diagnostic):
matches = re.match('<unknown>:([0-9]*):', diagnostic)
return int(matches.group(1))
def assert_conversion_fails(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
actual_source_lines = []
expected_error_regex = None
expected_error_line = None
expected_note_by_line = dict()
for line_index, line in enumerate(tmppy_source.splitlines()):
error_regex_marker = ' # error: '
note_regex_marker = ' # note: '
if error_regex_marker in line:
if expected_error_regex:
pytest.fail('Multiple expected errors in the same test are not supported', pytrace=False)
[line, expected_error_regex] = line.split(error_regex_marker)
expected_error_line = line_index + 1
elif note_regex_marker in line:
[line, expected_note_regex] = line.split(note_regex_marker)
expected_note_by_line[line_index + 1] = expected_note_regex
actual_source_lines.append(line)
if not expected_error_regex:
pytest.fail(
textwrap.dedent('''\
assert_conversion_fails was used, but no expected error regex was found.
TMPPy source:
{tmppy_source}
''').format(tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
try:
module_ir2, module_ir1 = _convert_tmppy_source_to_ir('\n'.join(actual_source_lines), create_identifier_generator())
e = None
except ast_to_ir3.CompilationError as e1:
e = e1
if not e:
pytest.fail(
textwrap.dedent('''\
Expected an exception, but the _py2tmp conversion completed successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1)),
pytrace=False)
# py2tmp diagnostics take up 3 lines each, e.g.:
# <unknown>:2:11: error: Empty lists are not currently supported.
# return []
# ^
py2tmp_diagnostics = _split_list(e.args[0].splitlines(), num_elems_in_chunk=3)
error_diagnostic = py2tmp_diagnostics[0]
expected_error_regex = '<unknown>:[0-9]*:[0-9]*: error: ' + expected_error_regex
if not re.match(expected_error_regex, error_diagnostic[0]):
pytest.fail(
textwrap.dedent('''\
An exception was thrown, but it didn\'t match the expected error regex.
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
''').format(expected_error_regex = expected_error_regex,
actual_error = '\n'.join(error_diagnostic),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
matches = re.match('<unknown>:([0-9]*):', error_diagnostic[0])
actual_error_line = int(matches.group(1))
if expected_error_line != actual_error_line:
pytest.fail(
textwrap.dedent('''\
An exception matching the expected regex was thrown, but the error mentioned the wrong line: {actual_error_line} was reported instead of {expected_error_line}
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
''').format(actual_error_line=actual_error_line,
expected_error_line=expected_error_line,
expected_error_regex = expected_error_regex,
actual_error = '\n'.join(error_diagnostic),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
actual_note_by_line = {_get_line_from_diagnostic(note[0]): note
for note in py2tmp_diagnostics[1:]}
for expected_note_line, expected_note_regex in expected_note_by_line.items():
actual_note = actual_note_by_line.get(expected_note_line)
if not actual_note:
raise Exception('Expected the note %s on line %s but no note was emitted mentioning this line. Emitted notes: %s' % (
expected_note_regex, expected_note_line, json.dumps(actual_note_by_line, indent=4)))
expected_note_regex = '<unknown>:[0-9]*:[0-9]*: note: ' + expected_note_regex
if not re.match(expected_note_regex, actual_note[0]):
pytest.fail(
textwrap.dedent('''\
A note diagnostic was emitted, but it didn\'t match the expected note regex.
Expected note regex: {expected_note_regex}
Actual note:
{actual_note}
TMPPy source:
{tmppy_source}
''').format(expected_note_regex = expected_note_regex,
actual_note = '\n'.join(actual_note),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
for actual_note_line, actual_note in actual_note_by_line.items():
expected_note = expected_note_by_line.get(actual_note_line)
if not expected_note:
pytest.fail(
textwrap.dedent('''\
Unexpected note:
{actual_note}
TMPPy source:
{tmppy_source}
''').format(actual_note = '\n'.join(actual_note),
tmppy_source = add_line_numbers(tmppy_source),
pytrace=False))
return wrapper
def assert_conversion_fails_with_codegen_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
try:
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
e = None
except ir0.CodegenError as e1:
e = e1
if not e:
pytest.fail(
textwrap.dedent('''\
Expected a codegen error, but the _py2tmp conversion completed successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir2}
C++ source:
{cpp_source}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=add_line_numbers(cpp_source)),
pytrace=False)
if not re.match(expected_error_regex, e.args[0]):
pytest.fail(
textwrap.dedent('''\
A codegen error was emitted as expected, but it didn\'t match the expected note regex.
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cpp_source}
''').format(expected_error_regex = expected_error_regex,
actual_error = e.args[0],
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=add_line_numbers(cpp_source)),
pytrace=False)
return wrapper
return eval
# Note: this is not the main function of this file, it's meant to be used as main function from test_*.py files.
def main(file):
code = pytest.main(args = sys.argv + [os.path.realpath(file)])
exit(code)
| 41.699195
| 202
| 0.547806
|
import inspect
import json
import os
import tempfile
import unittest
import textwrap
import re
import sys
import itertools
import subprocess
from functools import wraps
import difflib
import pytest
import py2tmp_test_config as config
import typed_ast.ast3 as ast
from _py2tmp import (
ast_to_ir3,
ir3_to_ir2,
ir2_to_ir1,
ir1_to_ir0,
optimize_ir3,
optimize_ir0,
ir0_to_cpp,
ir0,
utils,
)
def pretty_print_command(command):
return ' '.join('"' + x + '"' for x in command)
def add_line_numbers(source_code):
lines = source_code.splitlines()
last_line_num_length = len(str(len(lines)))
return '\n'.join('%%%sd: %%s' % last_line_num_length % (n + 1, line) for n, line in enumerate(lines))
class CommandFailedException(Exception):
def __init__(self, command, stdout, stderr, error_code):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Exit code {error_code}
Stdout:
{stdout}
Stderr:
{stderr}
''').format(command=pretty_print_command(self.command), error_code=self.error_code, stdout=self.stdout, stderr=self.stderr)
def run_command(executable, args=[]):
command = [executable] + args
print('Executing command:', pretty_print_command(command))
try:
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
except Exception as e:
raise Exception("While executing: %s" % command)
if p.returncode != 0:
raise CommandFailedException(command, stdout, stderr, p.returncode)
print('Execution successful.')
print('stdout:')
print(stdout)
print('')
print('stderr:')
print(stderr)
print('')
return (stdout, stderr)
def run_compiled_executable(executable):
run_command(executable)
class CompilationFailedException(Exception):
def __init__(self, command, error_message):
self.command = command
self.error_message = error_message
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Error message:
{error_message}
''').format(command=pretty_print_command(self.command), error_message=self.error_message)
class PosixCompiler:
def __init__(self):
self.executable = config.CXX
self.name = config.CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['-c', source, '-o', os.path.devnull]
self._compile(include_dirs, args=args)
except CommandFailedException as e:
raise CompilationFailedException(e.command, e.stderr)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ args
+ ['-o', output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
['-W', '-Wall', '-g0', '-Werror', '-std=c++11']
+ include_flags
+ args
)
run_command(self.executable, args)
class MsvcCompiler:
def __init__(self):
self.executable = config.CXX
self.name = config.CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['/c', source]
self._compile(include_dirs, args = args)
except CommandFailedException as e:
raise CompilationFailedException(e.command, e.stdout)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ args
+ ['/Fe' + output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
['/nologo', '/FS', '/W4', '/D_SCL_SECURE_NO_WARNINGS', '/WX']
+ include_flags
+ args
)
run_command(self.executable, args)
if config.CXX_COMPILER_NAME == 'MSVC':
compiler = MsvcCompiler()
py2tmp_error_message_extraction_regex = 'error C2338: (.*)'
else:
compiler = PosixCompiler()
py2tmp_error_message_extraction_regex = 'static.assert(.*)'
_assert_helper = unittest.TestCase()
def _create_temporary_file(file_content, file_name_suffix=''):
file_descriptor, file_name = tempfile.mkstemp(text=True, suffix=file_name_suffix)
file = os.fdopen(file_descriptor, mode='w')
file.write(file_content)
file.close()
return file_name
def _cap_to_lines(s, n):
lines = s.splitlines()
if len(lines) <= n:
return s
else:
return '\n'.join(lines[0:n] + ['...'])
def try_remove_temporary_file(filename):
try:
os.remove(filename)
except:
pass
def expect_cpp_code_compile_error_helper(check_error_fun, tmppy_source, module_ir2, module_ir1, cxx_source):
source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp')
try:
compiler.compile_discarding_output(
source=source_file_name,
include_dirs=[config.MPYL_INCLUDE_DIR],
args=[])
pytest.fail(textwrap.dedent('''\
The test should have failed to compile, but it compiled successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cxx_source = add_line_numbers(cxx_source)),
pytrace=False)
except CompilationFailedException as e1:
e = e1
error_message = e.error_message
error_message_lines = error_message.splitlines()
# Different compilers output a different number of spaces when pretty-printing types.
# When using libc++, sometimes std::foo identifiers are reported as std::__1::foo.
normalized_error_message = error_message.replace(' ', '').replace('std::__1::', 'std::')
normalized_error_message_lines = normalized_error_message.splitlines()
error_message_head = _cap_to_lines(error_message, 40)
check_error_fun(e, error_message_lines, error_message_head, normalized_error_message_lines)
try_remove_temporary_file(source_file_name)
def expect_cpp_code_generic_compile_error(expected_error_regex, tmppy_source, module_ir2, module_ir1, cxx_source):
expected_error_regex = expected_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines):
for line in normalized_error_message_lines:
if re.search(expected_error_regex, line):
return
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain that.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cxx_source}
''').format(expected_error = expected_error_regex,
compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_compile_error(
expected_py2tmp_error_regex,
expected_py2tmp_error_desc_regex,
tmppy_source,
module_ir2,
module_ir1,
cxx_source):
if '\n' in expected_py2tmp_error_regex:
raise Exception('expected_py2tmp_error_regex should not contain newlines')
if '\n' in expected_py2tmp_error_desc_regex:
raise Exception('expected_py2tmp_error_desc_regex should not contain newlines')
expected_py2tmp_error_regex = expected_py2tmp_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines):
for line_number, line in enumerate(normalized_error_message_lines):
match = re.search('tmppy::impl::(.*Error<.*>)', line)
if match:
actual_py2tmp_error_line_number = line_number
actual_py2tmp_error = match.groups()[0]
if config.CXX_COMPILER_NAME == 'MSVC':
# MSVC errors are of the form:
#
# C:\Path\To\header\foo.h(59): note: see reference to class template instantiation 'tmppy::impl::MyError<X, Y>' being compiled
# with
# [
# X=int,
# Y=double
# ]
#
# So we need to parse the following few lines and use them to replace the placeholder types in the tmppy error type.
try:
replacement_lines = []
if normalized_error_message_lines[line_number + 1].strip() == 'with':
for line in itertools.islice(normalized_error_message_lines, line_number + 3, None):
line = line.strip()
if line == ']':
break
if line.endswith(','):
line = line[:-1]
replacement_lines.append(line)
for replacement_line in replacement_lines:
match = re.search('([A-Za-z0-9_-]*)=(.*)', replacement_line)
if not match:
raise Exception('Failed to parse replacement line: %s' % replacement_line) from e
(type_variable, type_expression) = match.groups()
actual_py2tmp_error = re.sub(r'\b' + type_variable + r'\b', type_expression, actual_py2tmp_error)
except Exception:
raise Exception('Failed to parse MSVC template type arguments')
break
else:
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain user-facing _py2tmp errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(expected_error = expected_py2tmp_error_regex,
compiler_command = e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
for line_number, line in enumerate(error_message_lines):
match = re.search(py2tmp_error_message_extraction_regex, line)
if match:
actual_static_assert_error_line_number = line_number
actual_static_assert_error = match.groups()[0]
break
else:
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain static_assert errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(expected_error = expected_py2tmp_error_regex,
compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
try:
regex_search_result = re.search(expected_py2tmp_error_regex, actual_py2tmp_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_py2tmp_error_regex) from e
if not regex_search_result:
pytest.fail(
textwrap.dedent('''\
The compilation failed as expected, but with a different error type.
Expected _py2tmp error type: {expected_py2tmp_error_regex}
Error type was: {actual_py2tmp_error}
Expected static assert error: {expected_py2tmp_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(expected_py2tmp_error_regex = expected_py2tmp_error_regex,
actual_py2tmp_error = actual_py2tmp_error,
expected_py2tmp_error_desc_regex = expected_py2tmp_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
try:
regex_search_result = re.search(expected_py2tmp_error_desc_regex, actual_static_assert_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_py2tmp_error_desc_regex) from e
if not regex_search_result:
pytest.fail(
textwrap.dedent('''\
The compilation failed as expected, but with a different error message.
Expected _py2tmp error type: {expected_py2tmp_error_regex}
Error type was: {actual_py2tmp_error}
Expected static assert error: {expected_py2tmp_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(expected_py2tmp_error_regex = expected_py2tmp_error_regex,
actual_py2tmp_error = actual_py2tmp_error,
expected_py2tmp_error_desc_regex = expected_py2tmp_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
# 6 is just a constant that works for both g++ (<=6.0.0 at least) and clang++ (<=4.0.0 at least).
# It might need to be changed.
if actual_py2tmp_error_line_number > 6 or actual_static_assert_error_line_number > 6:
pytest.fail(
textwrap.dedent('''\
The compilation failed with the expected message, but the error message contained too many lines before the relevant ones.
The error type was reported on line {actual_py2tmp_error_line_number} of the message (should be <=6).
The static assert was reported on line {actual_static_assert_error_line_number} of the message (should be <=6).
Error message:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(actual_py2tmp_error_line_number = actual_py2tmp_error_line_number,
actual_static_assert_error_line_number = actual_static_assert_error_line_number,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
for line in error_message_lines[:max(actual_py2tmp_error_line_number, actual_static_assert_error_line_number)]:
if re.search('tmppy::impl', line):
pytest.fail(
'The compilation failed with the expected message, but the error message contained some metaprogramming types in the output (besides Error). Error message:\n%s' + error_message_head,
pytrace=False)
expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, cxx_source):
if 'main(' not in cxx_source:
cxx_source += textwrap.dedent('''
int main() {
}
''')
source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp')
executable_suffix = {'posix': '', 'nt': '.exe'}[os.name]
output_file_name = _create_temporary_file('', executable_suffix)
e = None
try:
compiler.compile_and_link(
source=source_file_name,
include_dirs=[config.MPYL_INCLUDE_DIR],
output_file_name=output_file_name,
args=[])
except CommandFailedException as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The generated C++ source did not compile.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cxx_source}
''').format(compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = _cap_to_lines(e.stderr, 40)),
pytrace=False)
try:
run_compiled_executable(output_file_name)
except CommandFailedException as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The generated C++ executable did not run successfully.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
C++ source:
{cxx_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
cxx_source = add_line_numbers(cxx_source),
error_message = _cap_to_lines(e.stderr, 40)),
pytrace=False)
# Note that we don't delete the temporary files if the test failed. This is intentional, keeping them around helps debugging the failure.
try_remove_temporary_file(source_file_name)
try_remove_temporary_file(output_file_name)
def _get_function_body(f):
source_code, _ = inspect.getsourcelines(f)
expected_line = 'def %s():\n' % f.__name__
while source_code[0] != expected_line:
source_code = source_code[1:]
source_code = source_code[1:]
if source_code[0].strip() == '\'\'\'' and source_code[-1].strip() == '\'\'\'':
source_code = source_code[1:-1]
return textwrap.dedent(''.join(source_code))
def create_identifier_generator():
def identifier_generator_fun():
for i in itertools.count():
yield 'TmppyInternal_%s' % i
return iter(identifier_generator_fun())
def _convert_tmppy_source_to_ir(python_source, identifier_generator):
filename='<unknown>'
source_ast = ast.parse(python_source, filename)
module_ir3 = ast_to_ir3.module_ast_to_ir3(source_ast, filename, python_source.splitlines())
module_ir3 = optimize_ir3.optimize_module(module_ir3)
module_ir2 = ir3_to_ir2.module_to_ir2(module_ir3, identifier_generator)
module_ir1 = ir2_to_ir1.module_to_ir1(module_ir2)
return module_ir2, module_ir1
def _convert_to_cpp_expecting_success(tmppy_source):
identifier_generator = create_identifier_generator()
try:
module_ir2, module_ir1 = _convert_tmppy_source_to_ir(tmppy_source, identifier_generator)
e = None
except ast_to_ir3.CompilationError as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The conversion from TMPPy to C++ failed.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
error_message = e.args[0]),
pytrace=False)
try:
header = ir1_to_ir0.module_to_ir0(module_ir1, identifier_generator)
header = optimize_ir0.optimize_header(header, identifier_generator, verbose=False)
cpp_source = ir0_to_cpp.header_to_cpp(header, identifier_generator)
cpp_source = utils.clang_format(cpp_source)
return module_ir2, module_ir1, cpp_source
except ast_to_ir3.CompilationError as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The conversion from TMPPy to C++ failed.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
error_message=e.args[0]),
pytrace=False)
def assert_compilation_succeeds(extra_cpp_prelude=''):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, extra_cpp_prelude + cpp_source)
return wrapper
return eval
def assert_code_optimizes_to(expected_cpp_source: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
assert expected_cpp_source[0] == '\n'
if cpp_source != expected_cpp_source[1:]:
pytest.fail(
textwrap.dedent('''\
The generated code didn't match the expected code.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
Generated C++ source:
{cpp_source}
Expected C++ source:
{expected_cpp_source}
Diff:
{cpp_source_diff}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=str(cpp_source),
expected_cpp_source=str(expected_cpp_source[1:]),
cpp_source_diff=''.join(difflib.unified_diff(expected_cpp_source[1:].splitlines(True),
cpp_source.splitlines(True),
fromfile='expected.h',
tofile='actual.h'))),
pytrace=False)
return wrapper
return eval
def assert_compilation_fails(expected_py2tmp_error_regex: str, expected_py2tmp_error_desc_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_compile_error(
expected_py2tmp_error_regex,
expected_py2tmp_error_desc_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
# TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test).
def assert_compilation_fails_with_generic_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_generic_compile_error(
expected_error_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
# TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test).
def assert_compilation_fails_with_static_assert_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_generic_compile_error(
r'(error: static assertion failed: |error: static_assert failed .)' + expected_error_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
def _split_list(l, num_elems_in_chunk):
args = [iter(l)] * num_elems_in_chunk
return list(itertools.zip_longest(*args))
def _get_line_from_diagnostic(diagnostic):
matches = re.match('<unknown>:([0-9]*):', diagnostic)
return int(matches.group(1))
def assert_conversion_fails(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
actual_source_lines = []
expected_error_regex = None
expected_error_line = None
expected_note_by_line = dict()
for line_index, line in enumerate(tmppy_source.splitlines()):
error_regex_marker = '
note_regex_marker = '
if error_regex_marker in line:
if expected_error_regex:
pytest.fail('Multiple expected errors in the same test are not supported', pytrace=False)
[line, expected_error_regex] = line.split(error_regex_marker)
expected_error_line = line_index + 1
elif note_regex_marker in line:
[line, expected_note_regex] = line.split(note_regex_marker)
expected_note_by_line[line_index + 1] = expected_note_regex
actual_source_lines.append(line)
if not expected_error_regex:
pytest.fail(
textwrap.dedent('''\
assert_conversion_fails was used, but no expected error regex was found.
TMPPy source:
{tmppy_source}
''').format(tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
try:
module_ir2, module_ir1 = _convert_tmppy_source_to_ir('\n'.join(actual_source_lines), create_identifier_generator())
e = None
except ast_to_ir3.CompilationError as e1:
e = e1
if not e:
pytest.fail(
textwrap.dedent('''\
Expected an exception, but the _py2tmp conversion completed successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1)),
pytrace=False)
# py2tmp diagnostics take up 3 lines each, e.g.:
# <unknown>:2:11: error: Empty lists are not currently supported.
# return []
# ^
py2tmp_diagnostics = _split_list(e.args[0].splitlines(), num_elems_in_chunk=3)
error_diagnostic = py2tmp_diagnostics[0]
expected_error_regex = '<unknown>:[0-9]*:[0-9]*: error: ' + expected_error_regex
if not re.match(expected_error_regex, error_diagnostic[0]):
pytest.fail(
textwrap.dedent('''\
An exception was thrown, but it didn\'t match the expected error regex.
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
''').format(expected_error_regex = expected_error_regex,
actual_error = '\n'.join(error_diagnostic),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
matches = re.match('<unknown>:([0-9]*):', error_diagnostic[0])
actual_error_line = int(matches.group(1))
if expected_error_line != actual_error_line:
pytest.fail(
textwrap.dedent('''\
An exception matching the expected regex was thrown, but the error mentioned the wrong line: {actual_error_line} was reported instead of {expected_error_line}
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
''').format(actual_error_line=actual_error_line,
expected_error_line=expected_error_line,
expected_error_regex = expected_error_regex,
actual_error = '\n'.join(error_diagnostic),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
actual_note_by_line = {_get_line_from_diagnostic(note[0]): note
for note in py2tmp_diagnostics[1:]}
for expected_note_line, expected_note_regex in expected_note_by_line.items():
actual_note = actual_note_by_line.get(expected_note_line)
if not actual_note:
raise Exception('Expected the note %s on line %s but no note was emitted mentioning this line. Emitted notes: %s' % (
expected_note_regex, expected_note_line, json.dumps(actual_note_by_line, indent=4)))
expected_note_regex = '<unknown>:[0-9]*:[0-9]*: note: ' + expected_note_regex
if not re.match(expected_note_regex, actual_note[0]):
pytest.fail(
textwrap.dedent('''\
A note diagnostic was emitted, but it didn\'t match the expected note regex.
Expected note regex: {expected_note_regex}
Actual note:
{actual_note}
TMPPy source:
{tmppy_source}
''').format(expected_note_regex = expected_note_regex,
actual_note = '\n'.join(actual_note),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
for actual_note_line, actual_note in actual_note_by_line.items():
expected_note = expected_note_by_line.get(actual_note_line)
if not expected_note:
pytest.fail(
textwrap.dedent('''\
Unexpected note:
{actual_note}
TMPPy source:
{tmppy_source}
''').format(actual_note = '\n'.join(actual_note),
tmppy_source = add_line_numbers(tmppy_source),
pytrace=False))
return wrapper
def assert_conversion_fails_with_codegen_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
try:
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
e = None
except ir0.CodegenError as e1:
e = e1
if not e:
pytest.fail(
textwrap.dedent('''\
Expected a codegen error, but the _py2tmp conversion completed successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir2}
C++ source:
{cpp_source}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=add_line_numbers(cpp_source)),
pytrace=False)
if not re.match(expected_error_regex, e.args[0]):
pytest.fail(
textwrap.dedent('''\
A codegen error was emitted as expected, but it didn\'t match the expected note regex.
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cpp_source}
''').format(expected_error_regex = expected_error_regex,
actual_error = e.args[0],
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=add_line_numbers(cpp_source)),
pytrace=False)
return wrapper
return eval
def main(file):
code = pytest.main(args = sys.argv + [os.path.realpath(file)])
exit(code)
| true
| true
|
79071923a801566d232e587f0c64e2832498d90a
| 17,588
|
py
|
Python
|
src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_params.py
|
IamPeterPan/azure-cli
|
458a7641bf706601d22ee5b5e6435aab7ec95bca
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_params.py
|
IamPeterPan/azure-cli
|
458a7641bf706601d22ee5b5e6435aab7ec95bca
|
[
"MIT"
] | 3
|
2021-03-26T00:25:36.000Z
|
2022-03-29T22:03:55.000Z
|
src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_params.py
|
IamPeterPan/azure-cli
|
458a7641bf706601d22ee5b5e6435aab7ec95bca
|
[
"MIT"
] | 1
|
2020-07-13T22:28:09.000Z
|
2020-07-13T22:28:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
import argparse
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.mgmt.containerregistry.v2018_09_01.models import (
PasswordName,
WebhookStatus,
WebhookAction,
PolicyStatus,
RunStatus,
TaskStatus,
BaseImageTriggerType
)
from azure.mgmt.containerregistry.v2018_02_01_preview.models import (
BuildTaskStatus,
OsType,
BuildStatus,
BaseImageTriggerType as BuildBaseImageTriggerType
)
from azure.cli.core.commands.parameters import (
resource_group_name_type,
get_location_type,
tags_type,
deployment_name_type,
get_resource_name_completion_list,
quotes,
get_three_state_flag,
get_enum_type
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from ._constants import (
STORAGE_RESOURCE_TYPE,
REGISTRY_RESOURCE_TYPE,
WEBHOOK_RESOURCE_TYPE,
REPLICATION_RESOURCE_TYPE,
BUILD_TASK_RESOURCE_TYPE,
BUILD_STEP_RESOURCE_TYPE,
TASK_RESOURCE_TYPE,
CLASSIC_REGISTRY_SKU,
MANAGED_REGISTRY_SKU,
)
from ._validators import (
validate_headers,
validate_build_arg,
validate_secret_build_arg,
validate_arg,
validate_secret_arg,
validate_set,
validate_set_secret
)
image_by_tag_type = CLIArgumentType(
options_list=['--image', '-t'],
help="The name of the image. May include a tag in the format 'name:tag'."
)
image_by_tag_or_digest_type = CLIArgumentType(
options_list=['--image', '-t'],
help="The name of the image. May include a tag in the format 'name:tag' or digest in the format 'name@digest'."
)
def load_arguments(self, _): # pylint: disable=too-many-statements
with self.argument_context('acr') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', arg_type=tags_type)
c.argument('registry_name', options_list=['--name', '-n'], help='The name of the container registry. You can configure the default registry name using `az configure --defaults acr=<registry name>`', completer=get_resource_name_completion_list(REGISTRY_RESOURCE_TYPE), configured_default='acr')
c.argument('storage_account_name', help='Provide the name of an existing storage account if you\'re recreating a container registry over a previous registry created storage account. Only applicable to Classic SKU.', completer=get_resource_name_completion_list(STORAGE_RESOURCE_TYPE))
c.argument('sku', help='The SKU of the container registry', arg_type=get_enum_type(MANAGED_REGISTRY_SKU + CLASSIC_REGISTRY_SKU))
c.argument('admin_enabled', help='Indicates whether the admin user is enabled', arg_type=get_three_state_flag())
c.argument('password_name', help='The name of password to regenerate', arg_type=get_enum_type(PasswordName))
c.argument('username', options_list=['--username', '-u'], help='The username used to log into a container registry')
c.argument('password', options_list=['--password', '-p'], help='The password used to log into a container registry')
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
c.argument('image_names', arg_type=image_by_tag_type, action='append')
c.argument('timeout', type=int, help='The timeout in seconds.')
c.argument('docker_file_path', options_list=['--file', '-f'], help="The relative path of the the docker file to the source code root folder.")
c.argument('no_logs', help="Do not show logs after successfully queuing the build.", action='store_true')
c.argument('no_wait', help="Do not wait for the run to complete and return immediately after queuing the run.", action='store_true')
c.argument('no_format', help="Indicates whether the logs should be displayed in raw format", action='store_true')
c.argument('os_type', options_list=['--os'], help='The operating system type required for the build.', arg_type=get_enum_type(OsType))
with self.argument_context('acr import') as c:
c.argument('source', help="The source identifier in the format '[registry.azurecr.io/]repository[:tag]' or '[registry.azurecr.io/]repository@digest'.")
c.argument('source_registry', options_list=['--registry', '-r'], help='The source container registry can be name, login server or resource ID of the source registry.')
c.argument('target_tags', arg_type=image_by_tag_type, action='append')
c.argument('repository', help='The repository name to do a manifest-only copy for images.', action='append')
c.argument('force', help='Overwrite the existing tag of the image to be imported.', action='store_true')
with self.argument_context('acr config content-trust') as c:
c.argument('status', help="Indicates whether content-trust is enabled or disabled.", arg_type=get_enum_type(PolicyStatus))
with self.argument_context('acr repository') as c:
c.argument('repository', help="The name of the repository.")
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('top', type=int, help='Limit the number of items in the results.')
c.argument('orderby', help='Order the items in the results. Default to alphabetical order of names.', arg_type=get_enum_type(['time_asc', 'time_desc']))
c.argument('detail', help='Show detailed information.', action='store_true')
c.argument('delete_enabled', help='Indicates whether delete operation is allowed.', arg_type=get_three_state_flag())
c.argument('list_enabled', help='Indicates whether this item shows in list operation results.', arg_type=get_three_state_flag())
c.argument('read_enabled', help='Indicates whether read operation is allowed.', arg_type=get_three_state_flag())
c.argument('write_enabled', help='Indicates whether write or delete operation is allowed.', arg_type=get_three_state_flag())
with self.argument_context('acr repository delete') as c:
c.argument('manifest', nargs='?', required=False, const='', default=None, help=argparse.SUPPRESS)
c.argument('tag', help=argparse.SUPPRESS)
with self.argument_context('acr repository untag') as c:
c.argument('image', arg_type=image_by_tag_type)
with self.argument_context('acr create') as c:
c.argument('registry_name', completer=None)
c.argument('deployment_name', arg_type=deployment_name_type, validator=None)
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
with self.argument_context('acr check-name') as c:
c.argument('registry_name', completer=None)
with self.argument_context('acr webhook') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('webhook_name', options_list=['--name', '-n'], help='The name of the webhook', completer=get_resource_name_completion_list(WEBHOOK_RESOURCE_TYPE))
c.argument('uri', help='The service URI for the webhook to post notifications.')
c.argument('headers', nargs='+', help="Space-separated custom headers in 'key[=value]' format that will be added to the webhook notifications. Use {} to clear existing headers.".format(quotes), validator=validate_headers)
c.argument('actions', nargs='+', help='Space-separated list of actions that trigger the webhook to post notifications.', arg_type=get_enum_type(WebhookAction))
c.argument('status', help='Indicates whether the webhook is enabled.', arg_type=get_enum_type(WebhookStatus))
c.argument('scope', help="The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means events for all repositories.")
with self.argument_context('acr webhook create') as c:
c.argument('webhook_name', completer=None)
with self.argument_context('acr replication') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('replication_name', options_list=['--name', '-n'], help='The name of the replication.', completer=get_resource_name_completion_list(REPLICATION_RESOURCE_TYPE))
with self.argument_context('acr replication create') as c:
c.argument('replication_name', help='The name of the replication. Default to the location name.', completer=None)
with self.argument_context('acr run') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.positional('source_location', help="The local source code directory path (e.g., './src') or the URL to a git repository (e.g., 'https://github.com/Azure-Samples/acr-build-helloworld-node.git') or a remote tarball (e.g., 'http://server/context.tar.gz').", completer=FilesCompleter())
c.argument('file', options_list=['--file', '-f'], help="The task template/definition file path relative to the source context.")
c.argument('values', help="The task values file path relative to the source context.")
c.argument('set_value', options_list=['--set'], help="Value in 'name[=value]' format.", action='append', validator=validate_set)
with self.argument_context('acr build') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.positional('source_location', help="The local source code directory path (e.g., './src') or the URL to a git repository (e.g., 'https://github.com/Azure-Samples/acr-build-helloworld-node.git') or a remote tarball (e.g., 'http://server/context.tar.gz').", completer=FilesCompleter())
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", action='store_true')
c.argument('arg', options_list=['--build-arg'], help="Build argument in 'name[=value]' format.", action='append', validator=validate_arg)
c.argument('secret_arg', options_list=['--secret-build-arg'], help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_arg)
with self.argument_context('acr build-task') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
# build task parameters
c.argument('build_task_name', options_list=['--name', '-n'], help='The name of the build task.', completer=get_resource_name_completion_list(BUILD_TASK_RESOURCE_TYPE))
c.argument('alias', help='The alternative name for build task. Default to the build task name.')
c.argument('status', help='The current status of build task.', arg_type=get_enum_type(BuildTaskStatus))
c.argument('cpu', type=int, help='The CPU configuration in terms of number of cores required for the build.')
c.argument('repository_url', options_list=['--context', '-c'], help="The full URL to the source code repository.")
c.argument('commit_trigger_enabled', help="Indicates whether the source control commit trigger is enabled.", arg_type=get_three_state_flag())
c.argument('git_access_token', help="The access token used to access the source control provider.")
c.argument('with_secure_properties', help="Indicates whether the secure properties of a build task should be returned.", action='store_true')
# build step parameters
c.argument('step_name', help='The name of the build step.', completer=get_resource_name_completion_list(BUILD_STEP_RESOURCE_TYPE))
c.argument('branch', help="The source control branch name.")
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", arg_type=get_three_state_flag())
c.argument('no_cache', help='Indicates whether the image cache is enabled.', arg_type=get_three_state_flag())
c.argument('base_image_trigger', help="The type of the auto trigger for base image dependency updates.", arg_type=get_enum_type(BuildBaseImageTriggerType))
# build parameters
c.argument('top', help='Limit the number of latest builds in the results.')
c.argument('build_id', help='The unique build identifier.')
c.argument('build_status', help='The current status of build.', arg_type=get_enum_type(BuildStatus))
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('no_archive', help='Indicates whether the build should be archived.', arg_type=get_three_state_flag())
c.argument('build_arg', help="Build argument in 'name[=value]' format.", action='append', validator=validate_build_arg)
c.argument('secret_build_arg', help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_build_arg)
with self.argument_context('acr task') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('task_name', options_list=['--name', '-n'], help='The name of the task.', completer=get_resource_name_completion_list(TASK_RESOURCE_TYPE))
c.argument('status', help='The current status of task.', arg_type=get_enum_type(TaskStatus))
c.argument('with_secure_properties', help="Indicates whether the secure properties of a task should be returned.", action='store_true')
# DockerBuildStep, FileTaskStep parameters
c.argument('file', options_list=['--file', '-f'], help="The relative path of the the task/docker file to the source code root folder. Task files must be suffixed with '.yaml'.")
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", arg_type=get_three_state_flag())
c.argument('no_cache', help='Indicates whether the image cache is enabled.', arg_type=get_three_state_flag())
c.argument('values', help="The task values/parameters file path relative to the source context.")
# common to DockerBuildStep, FileTaskStep and RunTaskStep
c.argument('context_path', options_list=['--context', '-c'], help="The full URL to the source code repository (Requires '.git' suffix for a github repo).")
c.argument('arg', help="Build argument in 'name[=value]' format.", action='append', validator=validate_arg)
c.argument('secret_arg', help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_arg)
c.argument('set_value', options_list=['--set'], help="Task value in 'name[=value]' format.", action='append', validator=validate_set)
c.argument('set_secret', help="Secret task value in 'name[=value]' format.", action='append', validator=validate_set_secret)
# Source Trigger parameters
c.argument('source_trigger_name', help="The name of the source trigger.")
c.argument('commit_trigger_enabled', help="Indicates whether the source control commit trigger is enabled.", arg_type=get_three_state_flag())
c.argument('git_access_token', help="The access token used to access the source control provider.")
c.argument('branch', help="The source control branch name.")
c.argument('base_image_trigger_name', help="The name of the base image trigger.")
c.argument('base_image_trigger_enabled', help="Indicates whether the base image trigger is enabled.", arg_type=get_three_state_flag())
c.argument('base_image_trigger_type', help="The type of the auto trigger for base image dependency updates.", arg_type=get_enum_type(BaseImageTriggerType))
# Run related parameters
c.argument('top', help='Limit the number of latest runs in the results.')
c.argument('run_id', help='The unique run identifier.')
c.argument('run_status', help='The current status of run.', arg_type=get_enum_type(RunStatus))
c.argument('no_archive', help='Indicates whether the run should be archived.', arg_type=get_three_state_flag())
# Run agent parameters
c.argument('cpu', type=int, help='The CPU configuration in terms of number of cores required for the run.')
with self.argument_context('acr task create') as c:
c.argument('task_name', completer=None)
with self.argument_context('acr build-task create') as c:
c.argument('build_task_name', completer=None)
with self.argument_context('acr helm') as c:
c.argument('resource_group_name', help=argparse.SUPPRESS)
c.argument('repository', help=argparse.SUPPRESS)
c.argument('version', help='The helm chart version.')
with self.argument_context('acr helm show') as c:
c.positional('chart', help='The helm chart name.')
with self.argument_context('acr helm delete') as c:
c.positional('chart', help='The helm chart name.')
c.argument('prov', help='Only delete the provenance file.', action='store_true')
with self.argument_context('acr helm push') as c:
c.positional('chart_package', help="The helm chart package.", completer=FilesCompleter())
c.argument('force', help='Overwrite the existing chart package.', action='store_true')
| 71.495935
| 301
| 0.712702
|
import argparse
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.mgmt.containerregistry.v2018_09_01.models import (
PasswordName,
WebhookStatus,
WebhookAction,
PolicyStatus,
RunStatus,
TaskStatus,
BaseImageTriggerType
)
from azure.mgmt.containerregistry.v2018_02_01_preview.models import (
BuildTaskStatus,
OsType,
BuildStatus,
BaseImageTriggerType as BuildBaseImageTriggerType
)
from azure.cli.core.commands.parameters import (
resource_group_name_type,
get_location_type,
tags_type,
deployment_name_type,
get_resource_name_completion_list,
quotes,
get_three_state_flag,
get_enum_type
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from ._constants import (
STORAGE_RESOURCE_TYPE,
REGISTRY_RESOURCE_TYPE,
WEBHOOK_RESOURCE_TYPE,
REPLICATION_RESOURCE_TYPE,
BUILD_TASK_RESOURCE_TYPE,
BUILD_STEP_RESOURCE_TYPE,
TASK_RESOURCE_TYPE,
CLASSIC_REGISTRY_SKU,
MANAGED_REGISTRY_SKU,
)
from ._validators import (
validate_headers,
validate_build_arg,
validate_secret_build_arg,
validate_arg,
validate_secret_arg,
validate_set,
validate_set_secret
)
image_by_tag_type = CLIArgumentType(
options_list=['--image', '-t'],
help="The name of the image. May include a tag in the format 'name:tag'."
)
image_by_tag_or_digest_type = CLIArgumentType(
options_list=['--image', '-t'],
help="The name of the image. May include a tag in the format 'name:tag' or digest in the format 'name@digest'."
)
def load_arguments(self, _):
with self.argument_context('acr') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', arg_type=tags_type)
c.argument('registry_name', options_list=['--name', '-n'], help='The name of the container registry. You can configure the default registry name using `az configure --defaults acr=<registry name>`', completer=get_resource_name_completion_list(REGISTRY_RESOURCE_TYPE), configured_default='acr')
c.argument('storage_account_name', help='Provide the name of an existing storage account if you\'re recreating a container registry over a previous registry created storage account. Only applicable to Classic SKU.', completer=get_resource_name_completion_list(STORAGE_RESOURCE_TYPE))
c.argument('sku', help='The SKU of the container registry', arg_type=get_enum_type(MANAGED_REGISTRY_SKU + CLASSIC_REGISTRY_SKU))
c.argument('admin_enabled', help='Indicates whether the admin user is enabled', arg_type=get_three_state_flag())
c.argument('password_name', help='The name of password to regenerate', arg_type=get_enum_type(PasswordName))
c.argument('username', options_list=['--username', '-u'], help='The username used to log into a container registry')
c.argument('password', options_list=['--password', '-p'], help='The password used to log into a container registry')
c.argument('yes', options_list=['--yes', '-y'], help='Do not prompt for confirmation.', action='store_true')
c.argument('image_names', arg_type=image_by_tag_type, action='append')
c.argument('timeout', type=int, help='The timeout in seconds.')
c.argument('docker_file_path', options_list=['--file', '-f'], help="The relative path of the the docker file to the source code root folder.")
c.argument('no_logs', help="Do not show logs after successfully queuing the build.", action='store_true')
c.argument('no_wait', help="Do not wait for the run to complete and return immediately after queuing the run.", action='store_true')
c.argument('no_format', help="Indicates whether the logs should be displayed in raw format", action='store_true')
c.argument('os_type', options_list=['--os'], help='The operating system type required for the build.', arg_type=get_enum_type(OsType))
with self.argument_context('acr import') as c:
c.argument('source', help="The source identifier in the format '[registry.azurecr.io/]repository[:tag]' or '[registry.azurecr.io/]repository@digest'.")
c.argument('source_registry', options_list=['--registry', '-r'], help='The source container registry can be name, login server or resource ID of the source registry.')
c.argument('target_tags', arg_type=image_by_tag_type, action='append')
c.argument('repository', help='The repository name to do a manifest-only copy for images.', action='append')
c.argument('force', help='Overwrite the existing tag of the image to be imported.', action='store_true')
with self.argument_context('acr config content-trust') as c:
c.argument('status', help="Indicates whether content-trust is enabled or disabled.", arg_type=get_enum_type(PolicyStatus))
with self.argument_context('acr repository') as c:
c.argument('repository', help="The name of the repository.")
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('top', type=int, help='Limit the number of items in the results.')
c.argument('orderby', help='Order the items in the results. Default to alphabetical order of names.', arg_type=get_enum_type(['time_asc', 'time_desc']))
c.argument('detail', help='Show detailed information.', action='store_true')
c.argument('delete_enabled', help='Indicates whether delete operation is allowed.', arg_type=get_three_state_flag())
c.argument('list_enabled', help='Indicates whether this item shows in list operation results.', arg_type=get_three_state_flag())
c.argument('read_enabled', help='Indicates whether read operation is allowed.', arg_type=get_three_state_flag())
c.argument('write_enabled', help='Indicates whether write or delete operation is allowed.', arg_type=get_three_state_flag())
with self.argument_context('acr repository delete') as c:
c.argument('manifest', nargs='?', required=False, const='', default=None, help=argparse.SUPPRESS)
c.argument('tag', help=argparse.SUPPRESS)
with self.argument_context('acr repository untag') as c:
c.argument('image', arg_type=image_by_tag_type)
with self.argument_context('acr create') as c:
c.argument('registry_name', completer=None)
c.argument('deployment_name', arg_type=deployment_name_type, validator=None)
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
with self.argument_context('acr check-name') as c:
c.argument('registry_name', completer=None)
with self.argument_context('acr webhook') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('webhook_name', options_list=['--name', '-n'], help='The name of the webhook', completer=get_resource_name_completion_list(WEBHOOK_RESOURCE_TYPE))
c.argument('uri', help='The service URI for the webhook to post notifications.')
c.argument('headers', nargs='+', help="Space-separated custom headers in 'key[=value]' format that will be added to the webhook notifications. Use {} to clear existing headers.".format(quotes), validator=validate_headers)
c.argument('actions', nargs='+', help='Space-separated list of actions that trigger the webhook to post notifications.', arg_type=get_enum_type(WebhookAction))
c.argument('status', help='Indicates whether the webhook is enabled.', arg_type=get_enum_type(WebhookStatus))
c.argument('scope', help="The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means events for all repositories.")
with self.argument_context('acr webhook create') as c:
c.argument('webhook_name', completer=None)
with self.argument_context('acr replication') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('replication_name', options_list=['--name', '-n'], help='The name of the replication.', completer=get_resource_name_completion_list(REPLICATION_RESOURCE_TYPE))
with self.argument_context('acr replication create') as c:
c.argument('replication_name', help='The name of the replication. Default to the location name.', completer=None)
with self.argument_context('acr run') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.positional('source_location', help="The local source code directory path (e.g., './src') or the URL to a git repository (e.g., 'https://github.com/Azure-Samples/acr-build-helloworld-node.git') or a remote tarball (e.g., 'http://server/context.tar.gz').", completer=FilesCompleter())
c.argument('file', options_list=['--file', '-f'], help="The task template/definition file path relative to the source context.")
c.argument('values', help="The task values file path relative to the source context.")
c.argument('set_value', options_list=['--set'], help="Value in 'name[=value]' format.", action='append', validator=validate_set)
with self.argument_context('acr build') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.positional('source_location', help="The local source code directory path (e.g., './src') or the URL to a git repository (e.g., 'https://github.com/Azure-Samples/acr-build-helloworld-node.git') or a remote tarball (e.g., 'http://server/context.tar.gz').", completer=FilesCompleter())
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", action='store_true')
c.argument('arg', options_list=['--build-arg'], help="Build argument in 'name[=value]' format.", action='append', validator=validate_arg)
c.argument('secret_arg', options_list=['--secret-build-arg'], help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_arg)
with self.argument_context('acr build-task') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
# build task parameters
c.argument('build_task_name', options_list=['--name', '-n'], help='The name of the build task.', completer=get_resource_name_completion_list(BUILD_TASK_RESOURCE_TYPE))
c.argument('alias', help='The alternative name for build task. Default to the build task name.')
c.argument('status', help='The current status of build task.', arg_type=get_enum_type(BuildTaskStatus))
c.argument('cpu', type=int, help='The CPU configuration in terms of number of cores required for the build.')
c.argument('repository_url', options_list=['--context', '-c'], help="The full URL to the source code repository.")
c.argument('commit_trigger_enabled', help="Indicates whether the source control commit trigger is enabled.", arg_type=get_three_state_flag())
c.argument('git_access_token', help="The access token used to access the source control provider.")
c.argument('with_secure_properties', help="Indicates whether the secure properties of a build task should be returned.", action='store_true')
# build step parameters
c.argument('step_name', help='The name of the build step.', completer=get_resource_name_completion_list(BUILD_STEP_RESOURCE_TYPE))
c.argument('branch', help="The source control branch name.")
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", arg_type=get_three_state_flag())
c.argument('no_cache', help='Indicates whether the image cache is enabled.', arg_type=get_three_state_flag())
c.argument('base_image_trigger', help="The type of the auto trigger for base image dependency updates.", arg_type=get_enum_type(BuildBaseImageTriggerType))
# build parameters
c.argument('top', help='Limit the number of latest builds in the results.')
c.argument('build_id', help='The unique build identifier.')
c.argument('build_status', help='The current status of build.', arg_type=get_enum_type(BuildStatus))
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('no_archive', help='Indicates whether the build should be archived.', arg_type=get_three_state_flag())
c.argument('build_arg', help="Build argument in 'name[=value]' format.", action='append', validator=validate_build_arg)
c.argument('secret_build_arg', help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_build_arg)
with self.argument_context('acr task') as c:
c.argument('registry_name', options_list=['--registry', '-r'])
c.argument('task_name', options_list=['--name', '-n'], help='The name of the task.', completer=get_resource_name_completion_list(TASK_RESOURCE_TYPE))
c.argument('status', help='The current status of task.', arg_type=get_enum_type(TaskStatus))
c.argument('with_secure_properties', help="Indicates whether the secure properties of a task should be returned.", action='store_true')
# DockerBuildStep, FileTaskStep parameters
c.argument('file', options_list=['--file', '-f'], help="The relative path of the the task/docker file to the source code root folder. Task files must be suffixed with '.yaml'.")
c.argument('image', arg_type=image_by_tag_or_digest_type)
c.argument('no_push', help="Indicates whether the image built should be pushed to the registry.", arg_type=get_three_state_flag())
c.argument('no_cache', help='Indicates whether the image cache is enabled.', arg_type=get_three_state_flag())
c.argument('values', help="The task values/parameters file path relative to the source context.")
# common to DockerBuildStep, FileTaskStep and RunTaskStep
c.argument('context_path', options_list=['--context', '-c'], help="The full URL to the source code repository (Requires '.git' suffix for a github repo).")
c.argument('arg', help="Build argument in 'name[=value]' format.", action='append', validator=validate_arg)
c.argument('secret_arg', help="Secret build argument in 'name[=value]' format.", action='append', validator=validate_secret_arg)
c.argument('set_value', options_list=['--set'], help="Task value in 'name[=value]' format.", action='append', validator=validate_set)
c.argument('set_secret', help="Secret task value in 'name[=value]' format.", action='append', validator=validate_set_secret)
# Source Trigger parameters
c.argument('source_trigger_name', help="The name of the source trigger.")
c.argument('commit_trigger_enabled', help="Indicates whether the source control commit trigger is enabled.", arg_type=get_three_state_flag())
c.argument('git_access_token', help="The access token used to access the source control provider.")
c.argument('branch', help="The source control branch name.")
c.argument('base_image_trigger_name', help="The name of the base image trigger.")
c.argument('base_image_trigger_enabled', help="Indicates whether the base image trigger is enabled.", arg_type=get_three_state_flag())
c.argument('base_image_trigger_type', help="The type of the auto trigger for base image dependency updates.", arg_type=get_enum_type(BaseImageTriggerType))
# Run related parameters
c.argument('top', help='Limit the number of latest runs in the results.')
c.argument('run_id', help='The unique run identifier.')
c.argument('run_status', help='The current status of run.', arg_type=get_enum_type(RunStatus))
c.argument('no_archive', help='Indicates whether the run should be archived.', arg_type=get_three_state_flag())
# Run agent parameters
c.argument('cpu', type=int, help='The CPU configuration in terms of number of cores required for the run.')
with self.argument_context('acr task create') as c:
c.argument('task_name', completer=None)
with self.argument_context('acr build-task create') as c:
c.argument('build_task_name', completer=None)
with self.argument_context('acr helm') as c:
c.argument('resource_group_name', help=argparse.SUPPRESS)
c.argument('repository', help=argparse.SUPPRESS)
c.argument('version', help='The helm chart version.')
with self.argument_context('acr helm show') as c:
c.positional('chart', help='The helm chart name.')
with self.argument_context('acr helm delete') as c:
c.positional('chart', help='The helm chart name.')
c.argument('prov', help='Only delete the provenance file.', action='store_true')
with self.argument_context('acr helm push') as c:
c.positional('chart_package', help="The helm chart package.", completer=FilesCompleter())
c.argument('force', help='Overwrite the existing chart package.', action='store_true')
| true
| true
|
79071bac861e3f16bc973a0233f8ef4a74035a95
| 97
|
py
|
Python
|
randt/__init__.py
|
pordino/FalcomBot-cogs
|
869371b5e9a9395d84dfa186ddbb0b1f56771975
|
[
"MIT"
] | 9
|
2018-10-12T07:04:29.000Z
|
2021-06-12T03:20:01.000Z
|
randt/__init__.py
|
pordino/FalcomBot-cogs
|
869371b5e9a9395d84dfa186ddbb0b1f56771975
|
[
"MIT"
] | 4
|
2018-10-22T19:43:20.000Z
|
2021-07-21T09:15:43.000Z
|
randt/__init__.py
|
pordino/FalcomBot-cogs
|
869371b5e9a9395d84dfa186ddbb0b1f56771975
|
[
"MIT"
] | 9
|
2018-11-20T14:04:11.000Z
|
2021-09-20T13:21:35.000Z
|
from .randt import RandomizationTools
def setup(bot):
bot.add_cog(RandomizationTools(bot))
| 16.166667
| 40
| 0.773196
|
from .randt import RandomizationTools
def setup(bot):
bot.add_cog(RandomizationTools(bot))
| true
| true
|
79071bd33cb93a8554f6f7e4058d472eab17121b
| 349
|
py
|
Python
|
propel_app/routing.py
|
syz247179876/e_mall
|
f94e39e091e098242342f532ae371b8ff127542f
|
[
"Apache-2.0"
] | 7
|
2021-04-10T13:20:56.000Z
|
2022-03-29T15:00:29.000Z
|
propel_app/routing.py
|
syz247179876/E_mall
|
f94e39e091e098242342f532ae371b8ff127542f
|
[
"Apache-2.0"
] | 9
|
2021-05-11T03:53:31.000Z
|
2022-03-12T00:58:03.000Z
|
propel_app/routing.py
|
syz247179876/E_mall
|
f94e39e091e098242342f532ae371b8ff127542f
|
[
"Apache-2.0"
] | 2
|
2020-11-24T08:59:22.000Z
|
2020-11-24T14:10:59.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2020/8/8 下午4:22
# @Author : 司云中
# @File : routing.py
# @Software: Pycharm
from django.urls import path, re_path
websocket_urlpatterns = [
# 官方解释path可能存在某种bug,用re_path既可以支持正则,也可以支持path路由匹配规则
re_path(r'concern_notice',), # 用户店铺关注,当店主上架新商品的时候进行商品推送
re_path(r'buy_notice',), # 当用户购买商品后,推送购买信息
]
| 23.266667
| 61
| 0.681948
|
from django.urls import path, re_path
websocket_urlpatterns = [
re_path(r'concern_notice',),
re_path(r'buy_notice',),
]
| true
| true
|
79071bee9c5723cb68ba7fed21f1681008610963
| 1,151
|
py
|
Python
|
conf/tests.py
|
dyndeploy-test/timestrap
|
0335836398401910d8cf248d6aebfcf70838e39d
|
[
"BSD-2-Clause"
] | 1
|
2019-01-23T02:17:04.000Z
|
2019-01-23T02:17:04.000Z
|
conf/tests.py
|
usmanakram232/timestrap
|
851bddae883452bbe4987932e95953b71b2a95b7
|
[
"BSD-2-Clause"
] | 4
|
2021-03-09T00:41:40.000Z
|
2022-02-12T05:49:22.000Z
|
conf/tests.py
|
usmanakram232/timestrap
|
851bddae883452bbe4987932e95953b71b2a95b7
|
[
"BSD-2-Clause"
] | null | null | null |
from django.contrib.auth.models import User
from django.test import TestCase
from .models import Conf, Site, SitePermission
class ConfTestCase(TestCase):
def test_conf_created(self):
site = Site.objects.create(domain='test.site', name='Test Site')
self.assertIsInstance(site.conf, Conf)
class SitePermissionTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('Test User', 'test@user.com',
'test')
Site.objects.create(domain='test1.site', name='Test Site 1')
Site.objects.create(domain='test2.site', name='Test Site 2')
def test_sitepermission_created(self):
site_permission = SitePermission.objects.create(user=self.user)
self.assertIsInstance(site_permission, SitePermission)
def test_sitepermission_sites_added(self):
site_permission = SitePermission.objects.create(user=self.user)
site_permission.sites.set(Site.objects.all())
site_permission.save()
self.assertQuerysetEqual(site_permission.sites.all(),
map(repr, Site.objects.all()))
| 35.96875
| 74
| 0.67159
|
from django.contrib.auth.models import User
from django.test import TestCase
from .models import Conf, Site, SitePermission
class ConfTestCase(TestCase):
def test_conf_created(self):
site = Site.objects.create(domain='test.site', name='Test Site')
self.assertIsInstance(site.conf, Conf)
class SitePermissionTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('Test User', 'test@user.com',
'test')
Site.objects.create(domain='test1.site', name='Test Site 1')
Site.objects.create(domain='test2.site', name='Test Site 2')
def test_sitepermission_created(self):
site_permission = SitePermission.objects.create(user=self.user)
self.assertIsInstance(site_permission, SitePermission)
def test_sitepermission_sites_added(self):
site_permission = SitePermission.objects.create(user=self.user)
site_permission.sites.set(Site.objects.all())
site_permission.save()
self.assertQuerysetEqual(site_permission.sites.all(),
map(repr, Site.objects.all()))
| true
| true
|
79071c10fd1355f406a1e2cf968a687a0a05f5a8
| 2,272
|
py
|
Python
|
src/oci/network_load_balancer/models/work_request_log_entry_collection.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/network_load_balancer/models/work_request_log_entry_collection.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/network_load_balancer/models/work_request_log_entry_collection.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkRequestLogEntryCollection(object):
"""
Wrapper object for an array of WorkRequestLogEntry objects.
"""
def __init__(self, **kwargs):
"""
Initializes a new WorkRequestLogEntryCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this WorkRequestLogEntryCollection.
:type items: list[oci.network_load_balancer.models.WorkRequestLogEntry]
"""
self.swagger_types = {
'items': 'list[WorkRequestLogEntry]'
}
self.attribute_map = {
'items': 'items'
}
self._items = None
@property
def items(self):
"""
Gets the items of this WorkRequestLogEntryCollection.
An array of WorkRequestLogEntry objects.
:return: The items of this WorkRequestLogEntryCollection.
:rtype: list[oci.network_load_balancer.models.WorkRequestLogEntry]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this WorkRequestLogEntryCollection.
An array of WorkRequestLogEntry objects.
:param items: The items of this WorkRequestLogEntryCollection.
:type: list[oci.network_load_balancer.models.WorkRequestLogEntry]
"""
self._items = items
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32
| 245
| 0.680458
|
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkRequestLogEntryCollection(object):
def __init__(self, **kwargs):
self.swagger_types = {
'items': 'list[WorkRequestLogEntry]'
}
self.attribute_map = {
'items': 'items'
}
self._items = None
@property
def items(self):
return self._items
@items.setter
def items(self, items):
self._items = items
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
79071c807a3437341d65eb7d59be48b0a5a3ecd5
| 18,062
|
py
|
Python
|
google/cloud/datastore_v1/services/datastore/transports/grpc.py
|
LaudateCorpus1/python-datastore
|
b1f955b8d410392174092cb8131673a10ccc33ec
|
[
"Apache-2.0"
] | 50
|
2020-03-07T16:55:45.000Z
|
2022-03-25T12:10:12.000Z
|
google/cloud/datastore_v1/services/datastore/transports/grpc.py
|
LaudateCorpus1/python-datastore
|
b1f955b8d410392174092cb8131673a10ccc33ec
|
[
"Apache-2.0"
] | 161
|
2020-02-07T00:46:20.000Z
|
2022-03-16T20:02:16.000Z
|
google/cloud/datastore_v1/services/datastore/transports/grpc.py
|
LaudateCorpus1/python-datastore
|
b1f955b8d410392174092cb8131673a10ccc33ec
|
[
"Apache-2.0"
] | 28
|
2020-02-07T00:55:36.000Z
|
2022-03-03T06:07:03.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc
from google.cloud.datastore_v1.types import datastore
from .base import DatastoreTransport, DEFAULT_CLIENT_INFO
class DatastoreGrpcTransport(DatastoreTransport):
"""gRPC backend transport for Datastore.
Each RPC normalizes the partition IDs of the keys in its
input entities, and always returns entities with keys with
normalized partition IDs. This applies to all keys and entities,
including those in values, except keys with both an empty path
and an empty or unset partition ID. Normalization of input keys
sets the project ID (if not already set) to the project ID from
the request.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "datastore.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "datastore.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def lookup(self) -> Callable[[datastore.LookupRequest], datastore.LookupResponse]:
r"""Return a callable for the lookup method over gRPC.
Looks up entities by key.
Returns:
Callable[[~.LookupRequest],
~.LookupResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "lookup" not in self._stubs:
self._stubs["lookup"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/Lookup",
request_serializer=datastore.LookupRequest.serialize,
response_deserializer=datastore.LookupResponse.deserialize,
)
return self._stubs["lookup"]
@property
def run_query(
self,
) -> Callable[[datastore.RunQueryRequest], datastore.RunQueryResponse]:
r"""Return a callable for the run query method over gRPC.
Queries for entities.
Returns:
Callable[[~.RunQueryRequest],
~.RunQueryResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "run_query" not in self._stubs:
self._stubs["run_query"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/RunQuery",
request_serializer=datastore.RunQueryRequest.serialize,
response_deserializer=datastore.RunQueryResponse.deserialize,
)
return self._stubs["run_query"]
@property
def begin_transaction(
self,
) -> Callable[
[datastore.BeginTransactionRequest], datastore.BeginTransactionResponse
]:
r"""Return a callable for the begin transaction method over gRPC.
Begins a new transaction.
Returns:
Callable[[~.BeginTransactionRequest],
~.BeginTransactionResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "begin_transaction" not in self._stubs:
self._stubs["begin_transaction"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/BeginTransaction",
request_serializer=datastore.BeginTransactionRequest.serialize,
response_deserializer=datastore.BeginTransactionResponse.deserialize,
)
return self._stubs["begin_transaction"]
@property
def commit(self) -> Callable[[datastore.CommitRequest], datastore.CommitResponse]:
r"""Return a callable for the commit method over gRPC.
Commits a transaction, optionally creating, deleting
or modifying some entities.
Returns:
Callable[[~.CommitRequest],
~.CommitResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "commit" not in self._stubs:
self._stubs["commit"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/Commit",
request_serializer=datastore.CommitRequest.serialize,
response_deserializer=datastore.CommitResponse.deserialize,
)
return self._stubs["commit"]
@property
def rollback(
self,
) -> Callable[[datastore.RollbackRequest], datastore.RollbackResponse]:
r"""Return a callable for the rollback method over gRPC.
Rolls back a transaction.
Returns:
Callable[[~.RollbackRequest],
~.RollbackResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "rollback" not in self._stubs:
self._stubs["rollback"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/Rollback",
request_serializer=datastore.RollbackRequest.serialize,
response_deserializer=datastore.RollbackResponse.deserialize,
)
return self._stubs["rollback"]
@property
def allocate_ids(
self,
) -> Callable[[datastore.AllocateIdsRequest], datastore.AllocateIdsResponse]:
r"""Return a callable for the allocate ids method over gRPC.
Allocates IDs for the given keys, which is useful for
referencing an entity before it is inserted.
Returns:
Callable[[~.AllocateIdsRequest],
~.AllocateIdsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "allocate_ids" not in self._stubs:
self._stubs["allocate_ids"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/AllocateIds",
request_serializer=datastore.AllocateIdsRequest.serialize,
response_deserializer=datastore.AllocateIdsResponse.deserialize,
)
return self._stubs["allocate_ids"]
@property
def reserve_ids(
self,
) -> Callable[[datastore.ReserveIdsRequest], datastore.ReserveIdsResponse]:
r"""Return a callable for the reserve ids method over gRPC.
Prevents the supplied keys' IDs from being auto-
llocated by Cloud Datastore.
Returns:
Callable[[~.ReserveIdsRequest],
~.ReserveIdsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "reserve_ids" not in self._stubs:
self._stubs["reserve_ids"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/ReserveIds",
request_serializer=datastore.ReserveIdsRequest.serialize,
response_deserializer=datastore.ReserveIdsResponse.deserialize,
)
return self._stubs["reserve_ids"]
def close(self):
self.grpc_channel.close()
__all__ = ("DatastoreGrpcTransport",)
| 42.800948
| 87
| 0.62972
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import grpc
from google.cloud.datastore_v1.types import datastore
from .base import DatastoreTransport, DEFAULT_CLIENT_INFO
class DatastoreGrpcTransport(DatastoreTransport):
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "datastore.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "datastore.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
return self._grpc_channel
@property
def lookup(self) -> Callable[[datastore.LookupRequest], datastore.LookupResponse]:
if "lookup" not in self._stubs:
self._stubs["lookup"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/Lookup",
request_serializer=datastore.LookupRequest.serialize,
response_deserializer=datastore.LookupResponse.deserialize,
)
return self._stubs["lookup"]
@property
def run_query(
self,
) -> Callable[[datastore.RunQueryRequest], datastore.RunQueryResponse]:
if "run_query" not in self._stubs:
self._stubs["run_query"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/RunQuery",
request_serializer=datastore.RunQueryRequest.serialize,
response_deserializer=datastore.RunQueryResponse.deserialize,
)
return self._stubs["run_query"]
@property
def begin_transaction(
self,
) -> Callable[
[datastore.BeginTransactionRequest], datastore.BeginTransactionResponse
]:
if "begin_transaction" not in self._stubs:
self._stubs["begin_transaction"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/BeginTransaction",
request_serializer=datastore.BeginTransactionRequest.serialize,
response_deserializer=datastore.BeginTransactionResponse.deserialize,
)
return self._stubs["begin_transaction"]
@property
def commit(self) -> Callable[[datastore.CommitRequest], datastore.CommitResponse]:
if "commit" not in self._stubs:
self._stubs["commit"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/Commit",
request_serializer=datastore.CommitRequest.serialize,
response_deserializer=datastore.CommitResponse.deserialize,
)
return self._stubs["commit"]
@property
def rollback(
self,
) -> Callable[[datastore.RollbackRequest], datastore.RollbackResponse]:
if "rollback" not in self._stubs:
self._stubs["rollback"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/Rollback",
request_serializer=datastore.RollbackRequest.serialize,
response_deserializer=datastore.RollbackResponse.deserialize,
)
return self._stubs["rollback"]
@property
def allocate_ids(
self,
) -> Callable[[datastore.AllocateIdsRequest], datastore.AllocateIdsResponse]:
if "allocate_ids" not in self._stubs:
self._stubs["allocate_ids"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/AllocateIds",
request_serializer=datastore.AllocateIdsRequest.serialize,
response_deserializer=datastore.AllocateIdsResponse.deserialize,
)
return self._stubs["allocate_ids"]
@property
def reserve_ids(
self,
) -> Callable[[datastore.ReserveIdsRequest], datastore.ReserveIdsResponse]:
if "reserve_ids" not in self._stubs:
self._stubs["reserve_ids"] = self.grpc_channel.unary_unary(
"/google.datastore.v1.Datastore/ReserveIds",
request_serializer=datastore.ReserveIdsRequest.serialize,
response_deserializer=datastore.ReserveIdsResponse.deserialize,
)
return self._stubs["reserve_ids"]
def close(self):
self.grpc_channel.close()
__all__ = ("DatastoreGrpcTransport",)
| true
| true
|
79071db7886198fa699378735eefa00c44913e2d
| 8,152
|
py
|
Python
|
env/lib/python3.8/site-packages/sentry_sdk/integrations/asgi.py
|
crimergio/linux_test
|
5e688a06884ab10b4eaaad10a5d0df417a1c9b31
|
[
"CC-BY-4.0"
] | null | null | null |
env/lib/python3.8/site-packages/sentry_sdk/integrations/asgi.py
|
crimergio/linux_test
|
5e688a06884ab10b4eaaad10a5d0df417a1c9b31
|
[
"CC-BY-4.0"
] | null | null | null |
env/lib/python3.8/site-packages/sentry_sdk/integrations/asgi.py
|
crimergio/linux_test
|
5e688a06884ab10b4eaaad10a5d0df417a1c9b31
|
[
"CC-BY-4.0"
] | null | null | null |
"""
An ASGI middleware.
Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
"""
import asyncio
import inspect
import urllib
from sentry_sdk._functools import partial
from sentry_sdk._types import MYPY
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk.utils import (
ContextVar,
event_from_exception,
transaction_from_function,
HAS_REAL_CONTEXTVARS,
CONTEXTVARS_ERROR_MESSAGE,
)
from sentry_sdk.tracing import Transaction
if MYPY:
from typing import Dict
from typing import Any
from typing import Optional
from typing import Callable
from typing_extensions import Literal
from sentry_sdk._types import Event, Hint
_asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
_DEFAULT_TRANSACTION_NAME = "generic ASGI request"
def _capture_exception(hub, exc):
# type: (Hub, Any) -> None
# Check client here as it might have been unset while streaming response
if hub.client is not None:
event, hint = event_from_exception(
exc,
client_options=hub.client.options,
mechanism={"type": "asgi", "handled": False},
)
hub.capture_event(event, hint=hint)
def _looks_like_asgi3(app):
# type: (Any) -> bool
"""
Try to figure out if an application object supports ASGI3.
This is how uvicorn figures out the application version as well.
"""
if inspect.isclass(app):
return hasattr(app, "__await__")
elif inspect.isfunction(app):
return asyncio.iscoroutinefunction(app)
else:
call = getattr(app, "__call__", None) # noqa
return asyncio.iscoroutinefunction(call)
class SentryAsgiMiddleware:
__slots__ = ("app", "__call__")
def __init__(self, app, unsafe_context_data=False):
# type: (Any, bool) -> None
"""
Instrument an ASGI application with Sentry. Provides HTTP/websocket
data to sent events and basic handling for exceptions bubbling up
through the middleware.
:param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.
"""
if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
raise RuntimeError(
"The ASGI middleware for Sentry requires Python 3.7+ "
"or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
)
self.app = app
if _looks_like_asgi3(app):
self.__call__ = self._run_asgi3 # type: Callable[..., Any]
else:
self.__call__ = self._run_asgi2
def _run_asgi2(self, scope):
# type: (Any) -> Any
async def inner(receive, send):
# type: (Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope)(receive, send))
return inner
async def _run_asgi3(self, scope, receive, send):
# type: (Any, Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope, receive, send))
async def _run_app(self, scope, callback):
# type: (Any, Any) -> Any
if _asgi_middleware_applied.get(False):
return await callback()
_asgi_middleware_applied.set(True)
try:
hub = Hub(Hub.current)
with hub:
with hub.configure_scope() as sentry_scope:
sentry_scope.clear_breadcrumbs()
sentry_scope._name = "asgi"
processor = partial(self.event_processor, asgi_scope=scope)
sentry_scope.add_event_processor(processor)
ty = scope["type"]
if ty in ("http", "websocket"):
transaction = Transaction.continue_from_headers(
dict(scope["headers"]),
op="{}.server".format(ty),
)
else:
transaction = Transaction(op="asgi.server")
transaction.name = _DEFAULT_TRANSACTION_NAME
transaction.set_tag("asgi.type", ty)
with hub.start_transaction(transaction):
# XXX: Would be cool to have correct span status, but we
# would have to wrap send(). That is a bit hard to do with
# the current abstraction over ASGI 2/3.
try:
return await callback()
except Exception as exc:
_capture_exception(hub, exc)
raise exc from None
finally:
_asgi_middleware_applied.set(False)
def event_processor(self, event, hint, asgi_scope):
# type: (Event, Hint, Any) -> Optional[Event]
request_info = event.get("request", {})
ty = asgi_scope["type"]
if ty in ("http", "websocket"):
request_info["method"] = asgi_scope.get("method")
request_info["headers"] = headers = _filter_headers(
self._get_headers(asgi_scope)
)
request_info["query_string"] = self._get_query(asgi_scope)
request_info["url"] = self._get_url(
asgi_scope, "http" if ty == "http" else "ws", headers.get("host")
)
client = asgi_scope.get("client")
if client and _should_send_default_pii():
request_info["env"] = {"REMOTE_ADDR": client[0]}
if (
event.get("transaction", _DEFAULT_TRANSACTION_NAME)
== _DEFAULT_TRANSACTION_NAME
):
endpoint = asgi_scope.get("endpoint")
# Webframeworks like Starlette mutate the ASGI env once routing is
# done, which is sometime after the request has started. If we have
# an endpoint, overwrite our generic transaction name.
if endpoint:
event["transaction"] = transaction_from_function(endpoint)
event["request"] = request_info
return event
# Helper functions for extracting request data.
#
# Note: Those functions are not public API. If you want to mutate request
# data to your liking it's recommended to use the `before_send` callback
# for that.
def _get_url(self, scope, default_scheme, host):
# type: (Dict[str, Any], Literal["ws", "http"], Optional[str]) -> str
"""
Extract URL from the ASGI scope, without also including the querystring.
"""
scheme = scope.get("scheme", default_scheme)
server = scope.get("server", None)
path = scope.get("root_path", "") + scope.get("path", "")
if host:
return "%s://%s%s" % (scheme, host, path)
if server is not None:
host, port = server
default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
if port != default_port:
return "%s://%s:%s%s" % (scheme, host, port, path)
return "%s://%s%s" % (scheme, host, path)
return path
def _get_query(self, scope):
# type: (Any) -> Any
"""
Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
"""
qs = scope.get("query_string")
if not qs:
return None
return urllib.parse.unquote(qs.decode("latin-1"))
def _get_headers(self, scope):
# type: (Any) -> Dict[str, str]
"""
Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
"""
headers = {} # type: Dict[str, str]
for raw_key, raw_value in scope["headers"]:
key = raw_key.decode("latin-1")
value = raw_value.decode("latin-1")
if key in headers:
headers[key] = headers[key] + ", " + value
else:
headers[key] = value
return headers
| 34.837607
| 161
| 0.59421
|
import asyncio
import inspect
import urllib
from sentry_sdk._functools import partial
from sentry_sdk._types import MYPY
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk.utils import (
ContextVar,
event_from_exception,
transaction_from_function,
HAS_REAL_CONTEXTVARS,
CONTEXTVARS_ERROR_MESSAGE,
)
from sentry_sdk.tracing import Transaction
if MYPY:
from typing import Dict
from typing import Any
from typing import Optional
from typing import Callable
from typing_extensions import Literal
from sentry_sdk._types import Event, Hint
_asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
_DEFAULT_TRANSACTION_NAME = "generic ASGI request"
def _capture_exception(hub, exc):
if hub.client is not None:
event, hint = event_from_exception(
exc,
client_options=hub.client.options,
mechanism={"type": "asgi", "handled": False},
)
hub.capture_event(event, hint=hint)
def _looks_like_asgi3(app):
if inspect.isclass(app):
return hasattr(app, "__await__")
elif inspect.isfunction(app):
return asyncio.iscoroutinefunction(app)
else:
call = getattr(app, "__call__", None)
return asyncio.iscoroutinefunction(call)
class SentryAsgiMiddleware:
__slots__ = ("app", "__call__")
def __init__(self, app, unsafe_context_data=False):
if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:
# requests.
raise RuntimeError(
"The ASGI middleware for Sentry requires Python 3.7+ "
"or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
)
self.app = app
if _looks_like_asgi3(app):
self.__call__ = self._run_asgi3 # type: Callable[..., Any]
else:
self.__call__ = self._run_asgi2
def _run_asgi2(self, scope):
# type: (Any) -> Any
async def inner(receive, send):
# type: (Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope)(receive, send))
return inner
async def _run_asgi3(self, scope, receive, send):
# type: (Any, Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope, receive, send))
async def _run_app(self, scope, callback):
# type: (Any, Any) -> Any
if _asgi_middleware_applied.get(False):
return await callback()
_asgi_middleware_applied.set(True)
try:
hub = Hub(Hub.current)
with hub:
with hub.configure_scope() as sentry_scope:
sentry_scope.clear_breadcrumbs()
sentry_scope._name = "asgi"
processor = partial(self.event_processor, asgi_scope=scope)
sentry_scope.add_event_processor(processor)
ty = scope["type"]
if ty in ("http", "websocket"):
transaction = Transaction.continue_from_headers(
dict(scope["headers"]),
op="{}.server".format(ty),
)
else:
transaction = Transaction(op="asgi.server")
transaction.name = _DEFAULT_TRANSACTION_NAME
transaction.set_tag("asgi.type", ty)
with hub.start_transaction(transaction):
# XXX: Would be cool to have correct span status, but we
# would have to wrap send(). That is a bit hard to do with
# the current abstraction over ASGI 2/3.
try:
return await callback()
except Exception as exc:
_capture_exception(hub, exc)
raise exc from None
finally:
_asgi_middleware_applied.set(False)
def event_processor(self, event, hint, asgi_scope):
# type: (Event, Hint, Any) -> Optional[Event]
request_info = event.get("request", {})
ty = asgi_scope["type"]
if ty in ("http", "websocket"):
request_info["method"] = asgi_scope.get("method")
request_info["headers"] = headers = _filter_headers(
self._get_headers(asgi_scope)
)
request_info["query_string"] = self._get_query(asgi_scope)
request_info["url"] = self._get_url(
asgi_scope, "http" if ty == "http" else "ws", headers.get("host")
)
client = asgi_scope.get("client")
if client and _should_send_default_pii():
request_info["env"] = {"REMOTE_ADDR": client[0]}
if (
event.get("transaction", _DEFAULT_TRANSACTION_NAME)
== _DEFAULT_TRANSACTION_NAME
):
endpoint = asgi_scope.get("endpoint")
# Webframeworks like Starlette mutate the ASGI env once routing is
# done, which is sometime after the request has started. If we have
# an endpoint, overwrite our generic transaction name.
if endpoint:
event["transaction"] = transaction_from_function(endpoint)
event["request"] = request_info
return event
# Helper functions for extracting request data.
#
# Note: Those functions are not public API. If you want to mutate request
# data to your liking it's recommended to use the `before_send` callback
def _get_url(self, scope, default_scheme, host):
scheme = scope.get("scheme", default_scheme)
server = scope.get("server", None)
path = scope.get("root_path", "") + scope.get("path", "")
if host:
return "%s://%s%s" % (scheme, host, path)
if server is not None:
host, port = server
default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
if port != default_port:
return "%s://%s:%s%s" % (scheme, host, port, path)
return "%s://%s%s" % (scheme, host, path)
return path
def _get_query(self, scope):
qs = scope.get("query_string")
if not qs:
return None
return urllib.parse.unquote(qs.decode("latin-1"))
def _get_headers(self, scope):
headers = {}
for raw_key, raw_value in scope["headers"]:
key = raw_key.decode("latin-1")
value = raw_value.decode("latin-1")
if key in headers:
headers[key] = headers[key] + ", " + value
else:
headers[key] = value
return headers
| true
| true
|
79071e08338ccf76b011c1ef1b95723dd45f0284
| 4,434
|
py
|
Python
|
rpiweather/server.py
|
wbkang/rpi-repo
|
fc2b770f99cc2405fbf6855f9f961c4f6aed99cb
|
[
"MIT"
] | null | null | null |
rpiweather/server.py
|
wbkang/rpi-repo
|
fc2b770f99cc2405fbf6855f9f961c4f6aed99cb
|
[
"MIT"
] | null | null | null |
rpiweather/server.py
|
wbkang/rpi-repo
|
fc2b770f99cc2405fbf6855f9f961c4f6aed99cb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import RPi.GPIO as GPIO
import time
import threading
import logging
import pandas as pd
import numpy as np
from tzlocal import get_localzone
from flask import Flask, render_template, url_for, request
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
from rpiweather import temphumid
from rpiweather import temppressure
from rpiweather import data
from rpiweather import outside_weather
from rpiweather import dust
temppressure.start_recording()
temphumid.start_recording()
outside_weather.start_recording()
dust.start_recording()
app = Flask("rpiweather")
def format_timestamps(series):
local_tz = get_localzone()
return list(
str(dt.tz_localize("UTC").tz_convert(local_tz)) for dt in series
)
@app.route("/")
def index():
lookbehind = int(request.args.get('lookbehind', 24))
bigarray = data.get_recent_datapoints(lookbehind)
logger.info("Total datapoint count: %d" % len(bigarray))
df = pd.DataFrame(bigarray, columns=['time', 'type', 'value'])
df['time'] = pd.to_datetime(df['time'])
df = df.set_index('time')
agg_interval = "15T" if lookbehind < 168 else "1H" if lookbehind < 5040 else "1D"
df2 = df.pivot(columns='type', values='value').resample(agg_interval).mean()
temp_df = df2['temperature'].dropna()
temp_values = {
'x': format_timestamps(temp_df.index),
'y': list(temp_df),
'name': 'Temperature',
'type': 'line',
'line': {
'color': 'rgb(244, 66, 98)'
}
}
outside_temp_df = df2['outside_temperature'].dropna()
ot_values = {
'x': format_timestamps(outside_temp_df.index),
'y': list(outside_temp_df),
'name': 'Temperature Outside',
'type': 'line',
'line': {
'color': 'rgb(244, 66, 98)',
'dash': 'longdash'
}
}
pres_df = df2['pressure'].dropna()
pressure_values = {
'x': format_timestamps(pres_df.index),
'y': list(pres_df),
'name': 'Pressure',
'type': 'line',
'yaxis': 'y2',
'line': {
'dash': 'dot',
'color': 'rgb(151,138,155)'
}
}
hum_df = df2['humidity'].dropna()
humidity_values = {
'x': format_timestamps(hum_df.index),
'y': list(hum_df),
'name': 'Humidity',
'type': 'scatter',
'fill': 'tozeroy',
'yaxis': 'y3',
'marker': {
'color': 'rgb(66,131,244)'
}
}
dust_df = df2['dust'].dropna()
dust_values = {
'x': format_timestamps(dust_df.index),
'y': list(dust_df),
'name': 'Dust level',
'type': 'line',
'yaxis': 'y4',
'line': {
'dash': 'dot',
'color': 'rgb(224, 205, 31)'
}
}
chart_data = [
temp_values, pressure_values, humidity_values, ot_values, dust_values
]
#import pdb; pdb.set_trace()
lookbehind_options = [(24, "1d"),
(24*7, "1w"),
(24*7*30, "30d")]
return render_template("index.html",
weather_data=chart_data,
lookbehind_options=lookbehind_options,
lookbehind=lookbehind)
def make_agg_df(rec):
df = pd.DataFrame.from_records(rec, index="time")
df.index = pd.to_datetime(df.index, unit="s")
return df.resample("T").mean()
def magic():
df_tp = make_agg_df(temppressure.get_records())
df_th = make_agg_df(temphumid.get_records())
df_th = df_th.rename(columns={'temp': 'bad_temp'})
total_view = pd.concat([df_tp, df_th], axis=1)
return total_view
#import IPython
# IPython.embed()
if False:
bigarray = data.get_recent_datapoints()
df = pd.DataFrame(bigarray, columns=['time', 'type', 'value'])
df['time'] = pd.to_datetime(df['time'])
df = df.set_index('time')
df2 = df.pivot(columns='type', values='value').resample("5T").mean()
temp_values = list(zip(
(dt.timestamp() for dt in df2.index),
df2['temperature']
))
pressure_values = list(zip(
(dt.timestamp() for dt in df2.index),
df2['pressure']
))
humidity_values = list(zip(
(dt.timestamp() for dt in df2.index),
df2['humidity']
))
| 27.886792
| 99
| 0.58525
|
import RPi.GPIO as GPIO
import time
import threading
import logging
import pandas as pd
import numpy as np
from tzlocal import get_localzone
from flask import Flask, render_template, url_for, request
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
from rpiweather import temphumid
from rpiweather import temppressure
from rpiweather import data
from rpiweather import outside_weather
from rpiweather import dust
temppressure.start_recording()
temphumid.start_recording()
outside_weather.start_recording()
dust.start_recording()
app = Flask("rpiweather")
def format_timestamps(series):
local_tz = get_localzone()
return list(
str(dt.tz_localize("UTC").tz_convert(local_tz)) for dt in series
)
@app.route("/")
def index():
lookbehind = int(request.args.get('lookbehind', 24))
bigarray = data.get_recent_datapoints(lookbehind)
logger.info("Total datapoint count: %d" % len(bigarray))
df = pd.DataFrame(bigarray, columns=['time', 'type', 'value'])
df['time'] = pd.to_datetime(df['time'])
df = df.set_index('time')
agg_interval = "15T" if lookbehind < 168 else "1H" if lookbehind < 5040 else "1D"
df2 = df.pivot(columns='type', values='value').resample(agg_interval).mean()
temp_df = df2['temperature'].dropna()
temp_values = {
'x': format_timestamps(temp_df.index),
'y': list(temp_df),
'name': 'Temperature',
'type': 'line',
'line': {
'color': 'rgb(244, 66, 98)'
}
}
outside_temp_df = df2['outside_temperature'].dropna()
ot_values = {
'x': format_timestamps(outside_temp_df.index),
'y': list(outside_temp_df),
'name': 'Temperature Outside',
'type': 'line',
'line': {
'color': 'rgb(244, 66, 98)',
'dash': 'longdash'
}
}
pres_df = df2['pressure'].dropna()
pressure_values = {
'x': format_timestamps(pres_df.index),
'y': list(pres_df),
'name': 'Pressure',
'type': 'line',
'yaxis': 'y2',
'line': {
'dash': 'dot',
'color': 'rgb(151,138,155)'
}
}
hum_df = df2['humidity'].dropna()
humidity_values = {
'x': format_timestamps(hum_df.index),
'y': list(hum_df),
'name': 'Humidity',
'type': 'scatter',
'fill': 'tozeroy',
'yaxis': 'y3',
'marker': {
'color': 'rgb(66,131,244)'
}
}
dust_df = df2['dust'].dropna()
dust_values = {
'x': format_timestamps(dust_df.index),
'y': list(dust_df),
'name': 'Dust level',
'type': 'line',
'yaxis': 'y4',
'line': {
'dash': 'dot',
'color': 'rgb(224, 205, 31)'
}
}
chart_data = [
temp_values, pressure_values, humidity_values, ot_values, dust_values
]
lookbehind_options = [(24, "1d"),
(24*7, "1w"),
(24*7*30, "30d")]
return render_template("index.html",
weather_data=chart_data,
lookbehind_options=lookbehind_options,
lookbehind=lookbehind)
def make_agg_df(rec):
df = pd.DataFrame.from_records(rec, index="time")
df.index = pd.to_datetime(df.index, unit="s")
return df.resample("T").mean()
def magic():
df_tp = make_agg_df(temppressure.get_records())
df_th = make_agg_df(temphumid.get_records())
df_th = df_th.rename(columns={'temp': 'bad_temp'})
total_view = pd.concat([df_tp, df_th], axis=1)
return total_view
if False:
bigarray = data.get_recent_datapoints()
df = pd.DataFrame(bigarray, columns=['time', 'type', 'value'])
df['time'] = pd.to_datetime(df['time'])
df = df.set_index('time')
df2 = df.pivot(columns='type', values='value').resample("5T").mean()
temp_values = list(zip(
(dt.timestamp() for dt in df2.index),
df2['temperature']
))
pressure_values = list(zip(
(dt.timestamp() for dt in df2.index),
df2['pressure']
))
humidity_values = list(zip(
(dt.timestamp() for dt in df2.index),
df2['humidity']
))
| true
| true
|
79071e3b22da102021d04ee58c22ca5f558e2f7b
| 11,178
|
py
|
Python
|
helpers.py
|
vg2691994/mock_frb_injection_results
|
a4747e5ef38ed2171af22e40816bf75afc7e192d
|
[
"MIT"
] | null | null | null |
helpers.py
|
vg2691994/mock_frb_injection_results
|
a4747e5ef38ed2171af22e40816bf75afc7e192d
|
[
"MIT"
] | null | null | null |
helpers.py
|
vg2691994/mock_frb_injection_results
|
a4747e5ef38ed2171af22e40816bf75afc7e192d
|
[
"MIT"
] | null | null | null |
#!/home/observer/miniconda2/bin/python
import numpy as N
import sys, os
import logging as L
import subprocess as S
from collections import namedtuple
from sigpyproc.Readers import FilReader as F
sys.path.append("/home/vgupta/Codes/Fake_FRBs/")
from Furby_reader import Furby_reader
class FileNotFound(Exception):
pass
class Observation():
def __init__(self, utc, cfg_file = "/home/vgupta/resources/observations.cfg"):
self.utc = utc
self.cfg_file = cfg_file
self.read_conf()
self.get_results_dir()
self.get_archives_dir()
self.is_failed = self.if_failed()
self.read_info()
self.processed_offline()
self.annotation = self.read_annotation()
def __str__(self):
return self.utc
def __repr__(self):
return self.utc
def read_annotation(self):
afile = os.path.join(self.results_dir, "obs.txt")
if not os.path.exists(afile):
return None
with open(afile, 'r') as f:
return f.read()
def read_conf(self):
if not os.path.exists(self.cfg_file):
raise Exception("Cannot find observation configuration file - {0}".format(self.cfg_file))
#raise FileNotFound("Cannot find observation configuration file - {0}".format(self.cfg_file))
conf_tmp = {}
with open(self.cfg_file) as c:
lines = c.readlines()
for line in lines:
if (line.startswith("#") or line == "" or line == "\n"):
continue
key = line.strip().split()[0].strip()
val = line.strip().split()[1].strip()
val = self.check_type(val)
conf_tmp[key] = val
tmp = namedtuple("CONF", conf_tmp.keys())
self.conf = tmp(*conf_tmp.values())
def get_results_dir(self):
path1 = os.path.join(self.conf.results_dir, self.utc)
path2 = os.path.join(self.conf.old_results_dir, self.utc)
if os.path.isdir(path1):
self.results_dir = self.conf.results_dir
elif os.path.isdir(path2):
self.results_dir = self.conf.old_results_dir
else:
raise IOError("Directory for UTC: {0} does not exist in any of the new or old results. Neither {1} nor {2} exists".format(self.utc, path1, path2))
def get_archives_dir(self):
path1 = os.path.join(self.conf.archives_dir, self.utc)
path2 = os.path.join(self.conf.old_archives_dir, self.utc)
if os.path.isdir(path1):
self.archives_dir = self.conf.archives_dir
elif os.path.isdir(path2):
self.archives_dir = self.conf.old_archives_dir
else:
raise IOError("Directory for UTC: {0} does not exist in any of the new or old archives".format(self.utc))
def processed_offline(self):
self.offline_cand_file = os.path.join(self.archives_dir, self.utc, self.conf.offline_output_dir, self.conf.offline_output_file)
self.processed_offline = os.path.exists(self.offline_cand_file) and not self.is_failed
def read_header(self):
if self.is_failed:
self.header = None
return
self.header_file = os.path.join(self.results_dir, self.utc, "FB", self.conf.header_file)
if not os.path.exists(self.header_file):
raise Exception("Header file({0}) does not exist".format(self.header_file))
with open(self.header_file) as h:
lines = h.readlines()
hdr_tmp = {}
for line in lines:
key = line.split()[0].strip()
val = line.split()[1].strip()
cval = self.check_type(val)
if key.startswith("FURBY"):
cval = str(val)
hdr_tmp[key] = cval
keys = hdr_tmp.keys()
values = hdr_tmp.values()
tmp = namedtuple("HEADER", keys)
self.header = tmp(*values)
self.tres = self.header.TSAMP * 1e-6
return self.header
def read_info(self):
self.obs_info_file = os.path.join(self.results_dir, self.utc, "obs.info")
if not os.path.exists(self.obs_info_file):
raise Exception("obs.info file({0}) does not exist".format(self.obs_info_file))
with open(self.obs_info_file) as h:
lines = h.readlines()
hdr_tmp = {}
for line in lines:
if line.startswith("#") or line == "" or line == "\n":
continue
key = line.split()[0].strip()
val = line.split()[1].strip()
val = self.check_type(val)
hdr_tmp[key] = val
if key=="INT" and self.is_failed:
val = 0
keys = hdr_tmp.keys()
values = hdr_tmp.values()
tmp = namedtuple("INFO", keys)
self.info = tmp(*values)
#Getting Tobs-----------------
filterbank_name = self.utc + ".fil"
filterbank_file = os.path.join(self.archives_dir, self.utc, "FB/BEAM_001/", filterbank_name)
if os.path.exists(filterbank_file):
filt_header = F(filterbank_file).header
self.tobs = filt_header.tobs
if self.info.INT > self.tobs:
self.tobs = self.info.INT
else:
self.tobs = self.info.INT
#-----------------------------
return self.info
def check_type(self, val):
try:
ans=int(val)
return ans
except ValueError:
try:
ans=float(val)
return ans
except ValueError:
if val.lower()=="false":
return False
elif val.lower()=="true":
return True
else:
return val
def if_processing(self):
processing_file = os.path.join(self.results_dir, self.utc, "obs.processing")
return os.path.exists(processing_file)
def if_failed(self):
obs_failed_file = os.path.join(self.results_dir, self.utc, "obs.failed")
return os.path.exists(obs_failed_file)
def read_furby_params(self):
if self.is_failed:
self.inj_furbys = -1
return
if (self.info.MB_ENABLED or self.info.CORR_ENABLED):
self.inj_furbys = -1
else:
self.read_header()
try:
self.inj_furbys = self.header.INJECTED_FURBYS
except AttributeError as e:
#log.warn("Could not find INJECTED_FURBYS in the header file for UTC: {0}".format(self.utc))
#log.warn("Assuming no furby injection happened in this observation ({0})".format(self.utc))
self.inj_furbys = 0
else:
if self.inj_furbys > 0:
self.furby_beams = self.header.FURBY_BEAMS.strip(",")
self.furby_ids = self.header.FURBY_IDS.strip(",")
self.furby_tstamps = self.header.FURBY_TSTAMPS.strip(",")
#log.debug("Found: injected_furbys: {0}, furby_ids: {1}, furby_beams: {2}, furby_tstamps: {3}".format(self.inj_furbys, self.furby_ids, self.furby_beams, self.furby_tstamps))
def split_and_filter_furby_params(self):
if self.inj_furbys < 1:
raise ValueError("No furbies to split")
f_ids = N.array(self.furby_ids.split(","))
f_beams = N.array(self.furby_beams.split(","))
f_tstamps = N.array(self.furby_tstamps.split(","))
f_ids = f_ids[N.where(f_ids!='')]
f_beams = f_beams[N.where(f_beams!='')]
f_tstamps = f_tstamps[N.where(f_tstamps!='')]
test = N.array([len(f_ids), len(f_beams), len(f_tstamps)])
if N.any(test-self.inj_furbys):
raise ValueError("Incorrect number of furby params, observation should have failed")
self.furbies = []
self.dropped_furbies = []
for i in range(self.inj_furbys):
furby = Furby(f_ids[i], db = os.path.join(self.archives_dir, self.utc, "Furbys"))
furby.i_beam = int(f_beams[i])
furby.i_tstamp = float(f_tstamps[i])
furby.calc_times()
if (self.check_if_dropped(furby)):
self.dropped_furbies.append(furby)
else:
self.furbies.append(furby)
def check_if_dropped(self, furby):
if not hasattr(furby, 'header'):
furby.read_fheader()
if not hasattr(furby, 'length'):
furby.calc_times()
if furby.i_tstamp < furby.length/2:
return True
if (furby.i_tstamp - furby.length/2) > self.tobs:
return True
all_furby_tstamps = N.array([float(i.i_tstamp) for i in self.furbies])
diff = furby.i_tstamp - all_furby_tstamps
if N.any((diff < (furby.length + 512*self.tres)) & (diff > 0)):
return True
return False
#----------------------------------------------------------------------------------------#
class Furby(Furby_reader):
def __init__(self, ID, db = "/home/dada/furby_database"):
self.ID = ID
self.name = "furby_"+ID
self.DB = db
self.file = os.path.join(self.DB, self.name)
self.i_beam = None
self.i_tstamp = None
self.i_snr = None
def __repr__(self):
return str(self.ID)
def read_fheader(self):
#self.header = self.read_header(self.file)
self.read_header(self.file)
def calc_times(self):
log = L.getLogger("furby_manager")
if not hasattr(self, 'header'):
self.read_fheader()
chw = (self.header.FTOP - self.header.FBOTTOM) / self.header.NCHAN
f_chtop = self.header.FTOP - chw/2
f_chmid = f_chtop - (self.header.NCHAN/2 * chw)
f_chbottom = self.header.FBOTTOM + chw/2
delay_to_top = 4.14881 * 1e6 * self.header.DM * ( f_chtop**(-2) - f_chmid**(-2) ) *1e-3 #in s
delay_to_bottom = 4.14881 * 1e6 * self.header.DM * ( f_chbottom**(-2) - f_chmid**(-2) ) *1e-3 #in s
self.s_time = self.i_tstamp + delay_to_top
self.e_time = self.i_tstamp + delay_to_bottom
self.c_time = self.i_tstamp
self.length = self.header.NSAMPS * self.header.TSAMP * 1e-6
#---------------------------------------------------------------------------------------#
def list_UTCs_from(start_utc):
#Note to someone editing this in future: Keep in mind that other scripts depend upon that fact that this function returns the list of UTCs in correctly sorted order. Do not change that, even if that costs speed. Or make sure that the scripts using this can be edited accordingly.
start = Observation(start_utc)
cmd = "ls -1d "+start.results_dir+"/202* | grep -A 999999 "+start_utc+" | awk -F/ '{print $5}'"
utcs = S.Popen(cmd, shell=True, stdout=S.PIPE).communicate()[0].strip().split("\n")
#VG: 02/05/2020 -- disabling the section below -- It doesn't work, and I don't have a quick fix either.
'''
if start.results_dir == start.conf.old_results_dir:
#Also append utcs from the new results directory
cmd = "ls -1d "+conf.results_dir+"/20* | grep -A 999999 "+start_utc+" | awk -F/ '{print $5}'"
utcs.extend(S.Popen(cmd, shell=True, stdout=S.PIPE).communicate()[0].strip().split("\n"))
'''
if len(utcs) == 0:
raise Exception("Given start UTC ({}) not found in {}".format(start_utc, start.results_dir))
return utcs
def list_UTCs_until(utc):
check = Observation(utc)
start_utc = get_first_UTC()
UTCs_from_start = list_UTCs_from(start_utc)
#Assume that list_UTCs_from() returns UTCs sorted in correct order, which it should.
end_utc = utc
index = N.where(UTCs_from_start == end_utc)[0]
UTCs_until = UTCs_from_start[:index+1]
return UTCs_until
def list_UTCs_after(utc):
inclusive_utcs = list_UTCS_from(utc)
return inclusive_utcs[1:]
def get_latest_UTC():
cmd = "ls -1d -rt "+conf.results_dir+"/20* | tail -1 | awk -F/ '{print $5}'"
utc = S.Popen(cmd, shell=True, stdout=S.PIPE).communcate()[0].strip()
return utc
def get_first_UTC():
'''
Returns the first UTC recorded by Molonglo after the disk crash in October 2017
'''
return "2017-10-31-08:49:32"
| 34.079268
| 281
| 0.643049
|
import numpy as N
import sys, os
import logging as L
import subprocess as S
from collections import namedtuple
from sigpyproc.Readers import FilReader as F
sys.path.append("/home/vgupta/Codes/Fake_FRBs/")
from Furby_reader import Furby_reader
class FileNotFound(Exception):
pass
class Observation():
def __init__(self, utc, cfg_file = "/home/vgupta/resources/observations.cfg"):
self.utc = utc
self.cfg_file = cfg_file
self.read_conf()
self.get_results_dir()
self.get_archives_dir()
self.is_failed = self.if_failed()
self.read_info()
self.processed_offline()
self.annotation = self.read_annotation()
def __str__(self):
return self.utc
def __repr__(self):
return self.utc
def read_annotation(self):
afile = os.path.join(self.results_dir, "obs.txt")
if not os.path.exists(afile):
return None
with open(afile, 'r') as f:
return f.read()
def read_conf(self):
if not os.path.exists(self.cfg_file):
raise Exception("Cannot find observation configuration file - {0}".format(self.cfg_file))
conf_tmp = {}
with open(self.cfg_file) as c:
lines = c.readlines()
for line in lines:
if (line.startswith("#") or line == "" or line == "\n"):
continue
key = line.strip().split()[0].strip()
val = line.strip().split()[1].strip()
val = self.check_type(val)
conf_tmp[key] = val
tmp = namedtuple("CONF", conf_tmp.keys())
self.conf = tmp(*conf_tmp.values())
def get_results_dir(self):
path1 = os.path.join(self.conf.results_dir, self.utc)
path2 = os.path.join(self.conf.old_results_dir, self.utc)
if os.path.isdir(path1):
self.results_dir = self.conf.results_dir
elif os.path.isdir(path2):
self.results_dir = self.conf.old_results_dir
else:
raise IOError("Directory for UTC: {0} does not exist in any of the new or old results. Neither {1} nor {2} exists".format(self.utc, path1, path2))
def get_archives_dir(self):
path1 = os.path.join(self.conf.archives_dir, self.utc)
path2 = os.path.join(self.conf.old_archives_dir, self.utc)
if os.path.isdir(path1):
self.archives_dir = self.conf.archives_dir
elif os.path.isdir(path2):
self.archives_dir = self.conf.old_archives_dir
else:
raise IOError("Directory for UTC: {0} does not exist in any of the new or old archives".format(self.utc))
def processed_offline(self):
self.offline_cand_file = os.path.join(self.archives_dir, self.utc, self.conf.offline_output_dir, self.conf.offline_output_file)
self.processed_offline = os.path.exists(self.offline_cand_file) and not self.is_failed
def read_header(self):
if self.is_failed:
self.header = None
return
self.header_file = os.path.join(self.results_dir, self.utc, "FB", self.conf.header_file)
if not os.path.exists(self.header_file):
raise Exception("Header file({0}) does not exist".format(self.header_file))
with open(self.header_file) as h:
lines = h.readlines()
hdr_tmp = {}
for line in lines:
key = line.split()[0].strip()
val = line.split()[1].strip()
cval = self.check_type(val)
if key.startswith("FURBY"):
cval = str(val)
hdr_tmp[key] = cval
keys = hdr_tmp.keys()
values = hdr_tmp.values()
tmp = namedtuple("HEADER", keys)
self.header = tmp(*values)
self.tres = self.header.TSAMP * 1e-6
return self.header
def read_info(self):
self.obs_info_file = os.path.join(self.results_dir, self.utc, "obs.info")
if not os.path.exists(self.obs_info_file):
raise Exception("obs.info file({0}) does not exist".format(self.obs_info_file))
with open(self.obs_info_file) as h:
lines = h.readlines()
hdr_tmp = {}
for line in lines:
if line.startswith("#") or line == "" or line == "\n":
continue
key = line.split()[0].strip()
val = line.split()[1].strip()
val = self.check_type(val)
hdr_tmp[key] = val
if key=="INT" and self.is_failed:
val = 0
keys = hdr_tmp.keys()
values = hdr_tmp.values()
tmp = namedtuple("INFO", keys)
self.info = tmp(*values)
filterbank_name = self.utc + ".fil"
filterbank_file = os.path.join(self.archives_dir, self.utc, "FB/BEAM_001/", filterbank_name)
if os.path.exists(filterbank_file):
filt_header = F(filterbank_file).header
self.tobs = filt_header.tobs
if self.info.INT > self.tobs:
self.tobs = self.info.INT
else:
self.tobs = self.info.INT
return self.info
def check_type(self, val):
try:
ans=int(val)
return ans
except ValueError:
try:
ans=float(val)
return ans
except ValueError:
if val.lower()=="false":
return False
elif val.lower()=="true":
return True
else:
return val
def if_processing(self):
processing_file = os.path.join(self.results_dir, self.utc, "obs.processing")
return os.path.exists(processing_file)
def if_failed(self):
obs_failed_file = os.path.join(self.results_dir, self.utc, "obs.failed")
return os.path.exists(obs_failed_file)
def read_furby_params(self):
if self.is_failed:
self.inj_furbys = -1
return
if (self.info.MB_ENABLED or self.info.CORR_ENABLED):
self.inj_furbys = -1
else:
self.read_header()
try:
self.inj_furbys = self.header.INJECTED_FURBYS
except AttributeError as e:
self.inj_furbys = 0
else:
if self.inj_furbys > 0:
self.furby_beams = self.header.FURBY_BEAMS.strip(",")
self.furby_ids = self.header.FURBY_IDS.strip(",")
self.furby_tstamps = self.header.FURBY_TSTAMPS.strip(",")
def split_and_filter_furby_params(self):
if self.inj_furbys < 1:
raise ValueError("No furbies to split")
f_ids = N.array(self.furby_ids.split(","))
f_beams = N.array(self.furby_beams.split(","))
f_tstamps = N.array(self.furby_tstamps.split(","))
f_ids = f_ids[N.where(f_ids!='')]
f_beams = f_beams[N.where(f_beams!='')]
f_tstamps = f_tstamps[N.where(f_tstamps!='')]
test = N.array([len(f_ids), len(f_beams), len(f_tstamps)])
if N.any(test-self.inj_furbys):
raise ValueError("Incorrect number of furby params, observation should have failed")
self.furbies = []
self.dropped_furbies = []
for i in range(self.inj_furbys):
furby = Furby(f_ids[i], db = os.path.join(self.archives_dir, self.utc, "Furbys"))
furby.i_beam = int(f_beams[i])
furby.i_tstamp = float(f_tstamps[i])
furby.calc_times()
if (self.check_if_dropped(furby)):
self.dropped_furbies.append(furby)
else:
self.furbies.append(furby)
def check_if_dropped(self, furby):
if not hasattr(furby, 'header'):
furby.read_fheader()
if not hasattr(furby, 'length'):
furby.calc_times()
if furby.i_tstamp < furby.length/2:
return True
if (furby.i_tstamp - furby.length/2) > self.tobs:
return True
all_furby_tstamps = N.array([float(i.i_tstamp) for i in self.furbies])
diff = furby.i_tstamp - all_furby_tstamps
if N.any((diff < (furby.length + 512*self.tres)) & (diff > 0)):
return True
return False
class Furby(Furby_reader):
def __init__(self, ID, db = "/home/dada/furby_database"):
self.ID = ID
self.name = "furby_"+ID
self.DB = db
self.file = os.path.join(self.DB, self.name)
self.i_beam = None
self.i_tstamp = None
self.i_snr = None
def __repr__(self):
return str(self.ID)
def read_fheader(self):
self.read_header(self.file)
def calc_times(self):
log = L.getLogger("furby_manager")
if not hasattr(self, 'header'):
self.read_fheader()
chw = (self.header.FTOP - self.header.FBOTTOM) / self.header.NCHAN
f_chtop = self.header.FTOP - chw/2
f_chmid = f_chtop - (self.header.NCHAN/2 * chw)
f_chbottom = self.header.FBOTTOM + chw/2
delay_to_top = 4.14881 * 1e6 * self.header.DM * ( f_chtop**(-2) - f_chmid**(-2) ) *1e-3
delay_to_bottom = 4.14881 * 1e6 * self.header.DM * ( f_chbottom**(-2) - f_chmid**(-2) ) *1e-3
self.s_time = self.i_tstamp + delay_to_top
self.e_time = self.i_tstamp + delay_to_bottom
self.c_time = self.i_tstamp
self.length = self.header.NSAMPS * self.header.TSAMP * 1e-6
def list_UTCs_from(start_utc):
start = Observation(start_utc)
cmd = "ls -1d "+start.results_dir+"/202* | grep -A 999999 "+start_utc+" | awk -F/ '{print $5}'"
utcs = S.Popen(cmd, shell=True, stdout=S.PIPE).communicate()[0].strip().split("\n")
if len(utcs) == 0:
raise Exception("Given start UTC ({}) not found in {}".format(start_utc, start.results_dir))
return utcs
def list_UTCs_until(utc):
check = Observation(utc)
start_utc = get_first_UTC()
UTCs_from_start = list_UTCs_from(start_utc)
end_utc = utc
index = N.where(UTCs_from_start == end_utc)[0]
UTCs_until = UTCs_from_start[:index+1]
return UTCs_until
def list_UTCs_after(utc):
inclusive_utcs = list_UTCS_from(utc)
return inclusive_utcs[1:]
def get_latest_UTC():
cmd = "ls -1d -rt "+conf.results_dir+"/20* | tail -1 | awk -F/ '{print $5}'"
utc = S.Popen(cmd, shell=True, stdout=S.PIPE).communcate()[0].strip()
return utc
def get_first_UTC():
return "2017-10-31-08:49:32"
| true
| true
|
79071ee9fe7f49640ed19449e3f774d5649ae15f
| 1,040
|
py
|
Python
|
tests/test_client.py
|
huangwanquan/python-orion-client
|
33a430b47ac8cc311d852d838b1f1e1409b5b322
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
huangwanquan/python-orion-client
|
33a430b47ac8cc311d852d838b1f1e1409b5b322
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
huangwanquan/python-orion-client
|
33a430b47ac8cc311d852d838b1f1e1409b5b322
|
[
"Apache-2.0"
] | 1
|
2021-09-30T09:07:14.000Z
|
2021-09-30T09:07:14.000Z
|
#!/usr/bin/env python3
# Software Name: ngsildclient
# SPDX-FileCopyrightText: Copyright (c) 2021 Orange
# SPDX-License-Identifier: Apache 2.0
#
# This software is distributed under the Apache 2.0;
# see the NOTICE file for more details.
#
# Author: Fabien BATTELLO <fabien.battello@orange.com> et al.
# SPDX-License-Identifier: Apache-2.0
import logging
from ngsildclient.api.client import Client, Vendor
from .common import mocked_connected
logger = logging.getLogger(__name__)
def test_api_is_connected(requests_mock):
requests_mock.get("http://localhost:1026/ngsi-ld/v1/entities", status_code=200)
client = Client()
assert client.is_connected()
def test_api_guess_broker(mocked_connected, requests_mock):
requests_mock.get(
"http://localhost:1026/version",
status_code=200,
json={"orionld version": "post-v0.8.1"},
)
client = Client()
vendor, version = client.guess_vendor()
logger.info(f"{vendor=}")
assert vendor == Vendor.ORIONLD
assert version == "post-v0.8.1"
| 28.108108
| 83
| 0.721154
|
import logging
from ngsildclient.api.client import Client, Vendor
from .common import mocked_connected
logger = logging.getLogger(__name__)
def test_api_is_connected(requests_mock):
requests_mock.get("http://localhost:1026/ngsi-ld/v1/entities", status_code=200)
client = Client()
assert client.is_connected()
def test_api_guess_broker(mocked_connected, requests_mock):
requests_mock.get(
"http://localhost:1026/version",
status_code=200,
json={"orionld version": "post-v0.8.1"},
)
client = Client()
vendor, version = client.guess_vendor()
logger.info(f"{vendor=}")
assert vendor == Vendor.ORIONLD
assert version == "post-v0.8.1"
| true
| true
|
79071f35cc4c3888455c1fe89db96efe7cbe3d8d
| 31,267
|
py
|
Python
|
xsd-fu/python/genshi/output.py
|
jburel/ome-model
|
4817c8dfcbe3bfbeafe899c489657769d7ebca60
|
[
"BSD-2-Clause"
] | 476
|
2015-01-07T08:59:53.000Z
|
2022-02-11T09:46:06.000Z
|
xsd-fu/python/genshi/output.py
|
jburel/ome-model
|
4817c8dfcbe3bfbeafe899c489657769d7ebca60
|
[
"BSD-2-Clause"
] | 82
|
2015-01-15T12:30:43.000Z
|
2022-01-06T02:56:53.000Z
|
xsd-fu/python/genshi/output.py
|
jburel/ome-model
|
4817c8dfcbe3bfbeafe899c489657769d7ebca60
|
[
"BSD-2-Clause"
] | 99
|
2015-01-14T19:53:45.000Z
|
2021-08-11T15:17:26.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This module provides different kinds of serialization methods for XML event
streams.
"""
from itertools import chain
import re
from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind
from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE
__all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer',
'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer']
__docformat__ = 'restructuredtext en'
def encode(iterator, method='xml', encoding=None, out=None):
"""Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter
"""
if encoding is not None:
errors = 'replace'
if method != 'text' and not isinstance(method, TextSerializer):
errors = 'xmlcharrefreplace'
_encode = lambda string: string.encode(encoding, errors)
else:
_encode = lambda string: string
if out is None:
return _encode(''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk))
def get_serializer(method='xml', **kwargs):
"""Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
"""
if isinstance(method, basestring):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs)
def _prepare_cache(use_cache=True):
"""Prepare a private token serialization cache.
:param use_cache: boolean indicating whether a real cache should
be used or not. If not, the returned functions
are no-ops.
:return: emit and get functions, for storing and retrieving
serialized values from the cache.
"""
cache = {}
if use_cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
_get = cache.get
else:
def _emit(kind, input, output):
return output
def _get(key):
pass
return _emit, _get, cache
class DocType(object):
"""Defines a number of commonly used DOCTYPE declarations as constants."""
HTML_STRICT = (
'html', '-//W3C//DTD HTML 4.01//EN',
'http://www.w3.org/TR/html4/strict.dtd'
)
HTML_TRANSITIONAL = (
'html', '-//W3C//DTD HTML 4.01 Transitional//EN',
'http://www.w3.org/TR/html4/loose.dtd'
)
HTML_FRAMESET = (
'html', '-//W3C//DTD HTML 4.01 Frameset//EN',
'http://www.w3.org/TR/html4/frameset.dtd'
)
HTML = HTML_STRICT
HTML5 = ('html', None, None)
XHTML_STRICT = (
'html', '-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'
)
XHTML_TRANSITIONAL = (
'html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
)
XHTML_FRAMESET = (
'html', '-//W3C//DTD XHTML 1.0 Frameset//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd'
)
XHTML = XHTML_STRICT
XHTML11 = (
'html', '-//W3C//DTD XHTML 1.1//EN',
'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'
)
SVG_FULL = (
'svg', '-//W3C//DTD SVG 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'
)
SVG_BASIC = (
'svg', '-//W3C//DTD SVG Basic 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd'
)
SVG_TINY = (
'svg', '-//W3C//DTD SVG Tiny 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd'
)
SVG = SVG_FULL
@classmethod
def get(cls, name):
"""Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1
"""
return {
'html': cls.HTML, 'html-strict': cls.HTML_STRICT,
'html-transitional': DocType.HTML_TRANSITIONAL,
'html-frameset': DocType.HTML_FRAMESET,
'html5': cls.HTML5,
'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT,
'xhtml-transitional': cls.XHTML_TRANSITIONAL,
'xhtml-frameset': cls.XHTML_FRAMESET,
'xhtml11': cls.XHTML11,
'svg': cls.SVG, 'svg-full': cls.SVG_FULL,
'svg-basic': cls.SVG_BASIC,
'svg-tiny': cls.SVG_TINY
}.get(name.lower())
class XMLSerializer(object):
"""Produces XML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XMLSerializer()(elem.generate())))
<div><a href="foo"/><br/><hr noshade="True"/></div>
"""
_PRESERVE_SPACE = frozenset()
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, cache=True):
"""Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
:note: Changed in 0.6: The `cache` parameter was added
"""
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache
def _prepare_cache(self):
return _prepare_cache(self.cache)[:2]
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is XML_DECL and not have_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class XHTMLSerializer(XMLSerializer):
"""Produces XHTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(XHTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br /><hr noshade="noshade" /></div>
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
_BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare',
'defer', 'disabled', 'ismap', 'multiple',
'nohref', 'noresize', 'noshade', 'nowrap'])
_PRESERVE_SPACE = frozenset([
QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'),
QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea')
])
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, drop_xml_decl=True, cache=True):
super(XHTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
namespace_prefixes = namespace_prefixes or {}
namespace_prefixes['http://www.w3.org/1999/xhtml'] = ''
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.drop_xml_decl = drop_xml_decl
self.cache = cache
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
drop_xml_decl = self.drop_xml_decl
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
value = attr
elif attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr == 'xml:space':
continue
buf += [' ', attr, '="', escape(value), '"']
if kind is EMPTY:
if tag in empty_elems:
buf.append(' />')
else:
buf.append('></%s>' % tag)
else:
buf.append('>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is XML_DECL and not have_decl and not drop_xml_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class HTMLSerializer(XHTMLSerializer):
"""Produces HTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print(''.join(HTMLSerializer()(elem.generate())))
<div><a href="foo"></a><br><hr noshade></div>
"""
_NOESCAPE_ELEMS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
def __init__(self, doctype=None, strip_whitespace=True, cache=True):
"""Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:param cache: whether to cache the text output per event, which
improves performance for repetitive markup
:note: Changed in 0.6: The `cache` parameter was added
"""
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
noescape_elems = self._NOESCAPE_ELEMS
have_doctype = False
noescape = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, _ in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
output = _get((kind, data))
if output is not None:
yield output
if (kind is START or kind is EMPTY) \
and data[0] in noescape_elems:
noescape = True
elif kind is END:
noescape = False
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
if value:
buf += [' ', attr]
elif ':' in attr:
if attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr != 'xmlns':
buf += [' ', attr, '="', escape(value), '"']
buf.append('>')
if kind is EMPTY:
if tag not in empty_elems:
buf.append('</%s>' % tag)
yield _emit(kind, data, Markup(''.join(buf)))
if tag in noescape_elems:
noescape = True
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
noescape = False
elif kind is TEXT:
if noescape:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class TextSerializer(object):
"""Produces plain text from an event stream.
Only text events are included in the output. Unlike the other serializer,
special XML characters are not escaped:
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a('<Hello!>', href='foo'), tag.br)
>>> print(elem)
<div><a href="foo"><Hello!></a><br/></div>
>>> print(''.join(TextSerializer()(elem.generate())))
<Hello!>
If text events contain literal markup (instances of the `Markup` class),
that markup is by default passed through unchanged:
>>> elem = tag.div(Markup('<a href="foo">Hello & Bye!</a><br/>'))
>>> print(elem.generate().render(TextSerializer, encoding=None))
<a href="foo">Hello & Bye!</a><br/>
You can use the ``strip_markup`` to change this behavior, so that tags and
entities are stripped from the output (or in the case of entities,
replaced with the equivalent character):
>>> print(elem.generate().render(TextSerializer, strip_markup=True,
... encoding=None))
Hello & Bye!
"""
def __init__(self, strip_markup=False):
"""Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed
"""
self.strip_markup = strip_markup
def __call__(self, stream):
strip_markup = self.strip_markup
for event in stream:
if event[0] is TEXT:
data = event[1]
if strip_markup and type(data) is Markup:
data = data.striptags().stripentities()
yield unicode(data)
class EmptyTagFilter(object):
"""Combines `START` and `STOP` events into `EMPTY` events for elements that
have no contents.
"""
EMPTY = StreamEventKind('EMPTY')
def __call__(self, stream):
prev = (None, None, None)
for ev in stream:
if prev[0] is START:
if ev[0] is END:
prev = EMPTY, prev[1], prev[2]
yield prev
continue
else:
yield prev
if ev[0] is not START:
yield ev
prev = ev
EMPTY = EmptyTagFilter.EMPTY
class NamespaceFlattener(object):
r"""Output stream filter that removes namespace information from the stream,
instead adding namespace attributes and prefixes as needed.
:param prefixes: optional mapping of namespace URIs to prefixes
>>> from genshi.input import XML
>>> xml = XML('''<doc xmlns="NS1" xmlns:two="NS2">
... <two:item/>
... </doc>''')
>>> for kind, data, pos in NamespaceFlattener()(xml):
... print('%s %r' % (kind, data))
START (u'doc', Attrs([('xmlns', u'NS1'), (u'xmlns:two', u'NS2')]))
TEXT u'\n '
START (u'two:item', Attrs())
END u'two:item'
TEXT u'\n'
END u'doc'
"""
def __init__(self, prefixes=None, cache=True):
self.prefixes = {XML_NAMESPACE.uri: 'xml'}
if prefixes is not None:
self.prefixes.update(prefixes)
self.cache = cache
def __call__(self, stream):
prefixes = dict([(v, [k]) for k, v in self.prefixes.items()])
namespaces = {XML_NAMESPACE.uri: ['xml']}
_emit, _get, cache = _prepare_cache(self.cache)
def _push_ns(prefix, uri):
namespaces.setdefault(uri, []).append(prefix)
prefixes.setdefault(prefix, []).append(uri)
cache.clear()
def _pop_ns(prefix):
uris = prefixes.get(prefix)
uri = uris.pop()
if not uris:
del prefixes[prefix]
if uri not in uris or uri != uris[-1]:
uri_prefixes = namespaces[uri]
uri_prefixes.pop()
if not uri_prefixes:
del namespaces[uri]
cache.clear()
return uri
ns_attrs = []
_push_ns_attr = ns_attrs.append
def _make_ns_attr(prefix, uri):
return 'xmlns%s' % (prefix and ':%s' % prefix or ''), uri
def _gen_prefix():
val = 0
while 1:
val += 1
yield 'ns%d' % val
_gen_prefix = _gen_prefix().next
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield kind, data, pos
continue
output = _get((kind, data))
if output is not None:
yield kind, output, pos
elif kind is START or kind is EMPTY:
tag, attrs = data
tagname = tag.localname
tagns = tag.namespace
if tagns:
if tagns in namespaces:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
else:
_push_ns_attr(('xmlns', tagns))
_push_ns('', tagns)
new_attrs = []
for attr, value in attrs:
attrname = attr.localname
attrns = attr.namespace
if attrns:
if attrns not in namespaces:
prefix = _gen_prefix()
_push_ns(prefix, attrns)
_push_ns_attr(('xmlns:%s' % prefix, attrns))
else:
prefix = namespaces[attrns][-1]
if prefix:
attrname = '%s:%s' % (prefix, attrname)
new_attrs.append((attrname, value))
data = _emit(kind, data, (tagname, Attrs(ns_attrs + new_attrs)))
yield kind, data, pos
del ns_attrs[:]
elif kind is END:
tagname = data.localname
tagns = data.namespace
if tagns:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
yield kind, _emit(kind, data, tagname), pos
elif kind is START_NS:
prefix, uri = data
if uri not in namespaces:
prefix = prefixes.get(uri, [prefix])[-1]
_push_ns_attr(_make_ns_attr(prefix, uri))
_push_ns(prefix, uri)
elif kind is END_NS:
if data in prefixes:
uri = _pop_ns(data)
if ns_attrs:
attr = _make_ns_attr(data, uri)
if attr in ns_attrs:
ns_attrs.remove(attr)
else:
yield kind, data, pos
class WhitespaceFilter(object):
"""A filter that removes extraneous ignorable white space from the
stream.
"""
def __init__(self, preserve=None, noescape=None):
"""Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``<style>`` or ``<script>`` in HTML
documents).
"""
if preserve is None:
preserve = []
self.preserve = frozenset(preserve)
if noescape is None:
noescape = []
self.noescape = frozenset(noescape)
def __call__(self, stream, ctxt=None, space=XML_NAMESPACE['space'],
trim_trailing_space=re.compile('[ \t]+(?=\n)').sub,
collapse_lines=re.compile('\n{2,}').sub):
mjoin = Markup('').join
preserve_elems = self.preserve
preserve = 0
noescape_elems = self.noescape
noescape = False
textbuf = []
push_text = textbuf.append
pop_text = textbuf.pop
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
if noescape:
data = Markup(data)
push_text(data)
else:
if textbuf:
if len(textbuf) > 1:
text = mjoin(textbuf, escape_quotes=False)
del textbuf[:]
else:
text = escape(pop_text(), quotes=False)
if not preserve:
text = collapse_lines('\n', trim_trailing_space('', text))
yield TEXT, Markup(text), pos
if kind is START:
tag, attrs = data
if preserve or (tag in preserve_elems or
attrs.get(space) == 'preserve'):
preserve += 1
if not noescape and tag in noescape_elems:
noescape = True
elif kind is END:
noescape = False
if preserve:
preserve -= 1
elif kind is START_CDATA:
noescape = True
elif kind is END_CDATA:
noescape = False
if kind:
yield kind, data, pos
class DocTypeInserter(object):
"""A filter that inserts the DOCTYPE declaration in the correct location,
after the XML declaration.
"""
def __init__(self, doctype):
"""Initialize the filter.
:param doctype: DOCTYPE as a string or DocType object.
"""
if isinstance(doctype, basestring):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, -1, -1))
def __call__(self, stream):
doctype_inserted = False
for kind, data, pos in stream:
if not doctype_inserted:
doctype_inserted = True
if kind is XML_DECL:
yield (kind, data, pos)
yield self.doctype_event
continue
yield self.doctype_event
yield (kind, data, pos)
if not doctype_inserted:
yield self.doctype_event
| 37.134204
| 82
| 0.521604
|
from itertools import chain
import re
from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind
from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE
__all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer',
'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer']
__docformat__ = 'restructuredtext en'
def encode(iterator, method='xml', encoding=None, out=None):
if encoding is not None:
errors = 'replace'
if method != 'text' and not isinstance(method, TextSerializer):
errors = 'xmlcharrefreplace'
_encode = lambda string: string.encode(encoding, errors)
else:
_encode = lambda string: string
if out is None:
return _encode(''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk))
def get_serializer(method='xml', **kwargs):
if isinstance(method, basestring):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs)
def _prepare_cache(use_cache=True):
cache = {}
if use_cache:
def _emit(kind, input, output):
cache[kind, input] = output
return output
_get = cache.get
else:
def _emit(kind, input, output):
return output
def _get(key):
pass
return _emit, _get, cache
class DocType(object):
HTML_STRICT = (
'html', '-//W3C//DTD HTML 4.01//EN',
'http://www.w3.org/TR/html4/strict.dtd'
)
HTML_TRANSITIONAL = (
'html', '-//W3C//DTD HTML 4.01 Transitional//EN',
'http://www.w3.org/TR/html4/loose.dtd'
)
HTML_FRAMESET = (
'html', '-//W3C//DTD HTML 4.01 Frameset//EN',
'http://www.w3.org/TR/html4/frameset.dtd'
)
HTML = HTML_STRICT
HTML5 = ('html', None, None)
XHTML_STRICT = (
'html', '-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'
)
XHTML_TRANSITIONAL = (
'html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
)
XHTML_FRAMESET = (
'html', '-//W3C//DTD XHTML 1.0 Frameset//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd'
)
XHTML = XHTML_STRICT
XHTML11 = (
'html', '-//W3C//DTD XHTML 1.1//EN',
'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'
)
SVG_FULL = (
'svg', '-//W3C//DTD SVG 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'
)
SVG_BASIC = (
'svg', '-//W3C//DTD SVG Basic 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd'
)
SVG_TINY = (
'svg', '-//W3C//DTD SVG Tiny 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd'
)
SVG = SVG_FULL
@classmethod
def get(cls, name):
return {
'html': cls.HTML, 'html-strict': cls.HTML_STRICT,
'html-transitional': DocType.HTML_TRANSITIONAL,
'html-frameset': DocType.HTML_FRAMESET,
'html5': cls.HTML5,
'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT,
'xhtml-transitional': cls.XHTML_TRANSITIONAL,
'xhtml-frameset': cls.XHTML_FRAMESET,
'xhtml11': cls.XHTML11,
'svg': cls.SVG, 'svg-full': cls.SVG_FULL,
'svg-basic': cls.SVG_BASIC,
'svg-tiny': cls.SVG_TINY
}.get(name.lower())
class XMLSerializer(object):
_PRESERVE_SPACE = frozenset()
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, cache=True):
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = cache
def _prepare_cache(self):
return _prepare_cache(self.cache)[:2]
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is XML_DECL and not have_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class XHTMLSerializer(XMLSerializer):
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
_BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare',
'defer', 'disabled', 'ismap', 'multiple',
'nohref', 'noresize', 'noshade', 'nowrap'])
_PRESERVE_SPACE = frozenset([
QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'),
QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea')
])
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, drop_xml_decl=True, cache=True):
super(XHTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
namespace_prefixes = namespace_prefixes or {}
namespace_prefixes['http://www.w3.org/1999/xhtml'] = ''
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes,
cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.drop_xml_decl = drop_xml_decl
self.cache = cache
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
drop_xml_decl = self.drop_xml_decl
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
value = attr
elif attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr == 'xml:space':
continue
buf += [' ', attr, '="', escape(value), '"']
if kind is EMPTY:
if tag in empty_elems:
buf.append(' />')
else:
buf.append('></%s>' % tag)
else:
buf.append('>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is XML_DECL and not have_decl and not drop_xml_decl:
version, encoding, standalone = data
buf = ['<?xml version="%s"' % version]
if encoding:
buf.append(' encoding="%s"' % encoding)
if standalone != -1:
standalone = standalone and 'yes' or 'no'
buf.append(' standalone="%s"' % standalone)
buf.append('?>\n')
yield Markup(''.join(buf))
have_decl = True
elif kind is START_CDATA:
yield Markup('<![CDATA[')
in_cdata = True
elif kind is END_CDATA:
yield Markup(']]>')
in_cdata = False
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class HTMLSerializer(XHTMLSerializer):
_NOESCAPE_ELEMS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
def __init__(self, doctype=None, strip_whitespace=True, cache=True):
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}, cache=cache))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.cache = True
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
noescape_elems = self._NOESCAPE_ELEMS
have_doctype = False
noescape = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, _ in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
output = _get((kind, data))
if output is not None:
yield output
if (kind is START or kind is EMPTY) \
and data[0] in noescape_elems:
noescape = True
elif kind is END:
noescape = False
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
if value:
buf += [' ', attr]
elif ':' in attr:
if attr == 'xml:lang' and 'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr != 'xmlns':
buf += [' ', attr, '="', escape(value), '"']
buf.append('>')
if kind is EMPTY:
if tag not in empty_elems:
buf.append('</%s>' % tag)
yield _emit(kind, data, Markup(''.join(buf)))
if tag in noescape_elems:
noescape = True
elif kind is END:
yield _emit(kind, data, Markup('</%s>' % data))
noescape = False
elif kind is TEXT:
if noescape:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
elif kind is COMMENT:
yield _emit(kind, data, Markup('<!--%s-->' % data))
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['<!DOCTYPE %s']
if pubid:
buf.append(' PUBLIC "%s"')
elif sysid:
buf.append(' SYSTEM')
if sysid:
buf.append(' "%s"')
buf.append('>\n')
yield Markup(''.join(buf)) % tuple([p for p in data if p])
have_doctype = True
elif kind is PI:
yield _emit(kind, data, Markup('<?%s %s?>' % data))
class TextSerializer(object):
def __init__(self, strip_markup=False):
self.strip_markup = strip_markup
def __call__(self, stream):
strip_markup = self.strip_markup
for event in stream:
if event[0] is TEXT:
data = event[1]
if strip_markup and type(data) is Markup:
data = data.striptags().stripentities()
yield unicode(data)
class EmptyTagFilter(object):
EMPTY = StreamEventKind('EMPTY')
def __call__(self, stream):
prev = (None, None, None)
for ev in stream:
if prev[0] is START:
if ev[0] is END:
prev = EMPTY, prev[1], prev[2]
yield prev
continue
else:
yield prev
if ev[0] is not START:
yield ev
prev = ev
EMPTY = EmptyTagFilter.EMPTY
class NamespaceFlattener(object):
def __init__(self, prefixes=None, cache=True):
self.prefixes = {XML_NAMESPACE.uri: 'xml'}
if prefixes is not None:
self.prefixes.update(prefixes)
self.cache = cache
def __call__(self, stream):
prefixes = dict([(v, [k]) for k, v in self.prefixes.items()])
namespaces = {XML_NAMESPACE.uri: ['xml']}
_emit, _get, cache = _prepare_cache(self.cache)
def _push_ns(prefix, uri):
namespaces.setdefault(uri, []).append(prefix)
prefixes.setdefault(prefix, []).append(uri)
cache.clear()
def _pop_ns(prefix):
uris = prefixes.get(prefix)
uri = uris.pop()
if not uris:
del prefixes[prefix]
if uri not in uris or uri != uris[-1]:
uri_prefixes = namespaces[uri]
uri_prefixes.pop()
if not uri_prefixes:
del namespaces[uri]
cache.clear()
return uri
ns_attrs = []
_push_ns_attr = ns_attrs.append
def _make_ns_attr(prefix, uri):
return 'xmlns%s' % (prefix and ':%s' % prefix or ''), uri
def _gen_prefix():
val = 0
while 1:
val += 1
yield 'ns%d' % val
_gen_prefix = _gen_prefix().next
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield kind, data, pos
continue
output = _get((kind, data))
if output is not None:
yield kind, output, pos
elif kind is START or kind is EMPTY:
tag, attrs = data
tagname = tag.localname
tagns = tag.namespace
if tagns:
if tagns in namespaces:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
else:
_push_ns_attr(('xmlns', tagns))
_push_ns('', tagns)
new_attrs = []
for attr, value in attrs:
attrname = attr.localname
attrns = attr.namespace
if attrns:
if attrns not in namespaces:
prefix = _gen_prefix()
_push_ns(prefix, attrns)
_push_ns_attr(('xmlns:%s' % prefix, attrns))
else:
prefix = namespaces[attrns][-1]
if prefix:
attrname = '%s:%s' % (prefix, attrname)
new_attrs.append((attrname, value))
data = _emit(kind, data, (tagname, Attrs(ns_attrs + new_attrs)))
yield kind, data, pos
del ns_attrs[:]
elif kind is END:
tagname = data.localname
tagns = data.namespace
if tagns:
prefix = namespaces[tagns][-1]
if prefix:
tagname = '%s:%s' % (prefix, tagname)
yield kind, _emit(kind, data, tagname), pos
elif kind is START_NS:
prefix, uri = data
if uri not in namespaces:
prefix = prefixes.get(uri, [prefix])[-1]
_push_ns_attr(_make_ns_attr(prefix, uri))
_push_ns(prefix, uri)
elif kind is END_NS:
if data in prefixes:
uri = _pop_ns(data)
if ns_attrs:
attr = _make_ns_attr(data, uri)
if attr in ns_attrs:
ns_attrs.remove(attr)
else:
yield kind, data, pos
class WhitespaceFilter(object):
def __init__(self, preserve=None, noescape=None):
if preserve is None:
preserve = []
self.preserve = frozenset(preserve)
if noescape is None:
noescape = []
self.noescape = frozenset(noescape)
def __call__(self, stream, ctxt=None, space=XML_NAMESPACE['space'],
trim_trailing_space=re.compile('[ \t]+(?=\n)').sub,
collapse_lines=re.compile('\n{2,}').sub):
mjoin = Markup('').join
preserve_elems = self.preserve
preserve = 0
noescape_elems = self.noescape
noescape = False
textbuf = []
push_text = textbuf.append
pop_text = textbuf.pop
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
if noescape:
data = Markup(data)
push_text(data)
else:
if textbuf:
if len(textbuf) > 1:
text = mjoin(textbuf, escape_quotes=False)
del textbuf[:]
else:
text = escape(pop_text(), quotes=False)
if not preserve:
text = collapse_lines('\n', trim_trailing_space('', text))
yield TEXT, Markup(text), pos
if kind is START:
tag, attrs = data
if preserve or (tag in preserve_elems or
attrs.get(space) == 'preserve'):
preserve += 1
if not noescape and tag in noescape_elems:
noescape = True
elif kind is END:
noescape = False
if preserve:
preserve -= 1
elif kind is START_CDATA:
noescape = True
elif kind is END_CDATA:
noescape = False
if kind:
yield kind, data, pos
class DocTypeInserter(object):
def __init__(self, doctype):
if isinstance(doctype, basestring):
doctype = DocType.get(doctype)
self.doctype_event = (DOCTYPE, doctype, (None, -1, -1))
def __call__(self, stream):
doctype_inserted = False
for kind, data, pos in stream:
if not doctype_inserted:
doctype_inserted = True
if kind is XML_DECL:
yield (kind, data, pos)
yield self.doctype_event
continue
yield self.doctype_event
yield (kind, data, pos)
if not doctype_inserted:
yield self.doctype_event
| true
| true
|
79071fba79fcbe4f8abb339625905d2d0f62c917
| 14,770
|
py
|
Python
|
models/model.py
|
DagothHertil/NNVEP-SRN-Deblur
|
c092fec78dfe73ce6247a56f1e16ab4f4576d6b0
|
[
"MIT"
] | null | null | null |
models/model.py
|
DagothHertil/NNVEP-SRN-Deblur
|
c092fec78dfe73ce6247a56f1e16ab4f4576d6b0
|
[
"MIT"
] | null | null | null |
models/model.py
|
DagothHertil/NNVEP-SRN-Deblur
|
c092fec78dfe73ce6247a56f1e16ab4f4576d6b0
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import time
import random
import datetime
import scipy.misc
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from datetime import datetime
from util.util import *
from util.BasicConvLSTMCell import *
class DEBLUR(object):
def __init__(self, args):
self.args = args
self.n_levels = 3
self.scale = 0.5
self.chns = 3 if self.args.model == 'color' else 1 # input / output channels
# if args.phase == 'train':
self.crop_size = 256
self.data_list = open(args.datalist, 'rt').read().splitlines()
self.data_list = list(map(lambda x: x.split(' '), self.data_list))
random.shuffle(self.data_list)
self.train_dir = os.path.join('./checkpoints', args.model)
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
self.batch_size = args.batch_size
self.epoch = args.epoch
self.data_size = (len(self.data_list)) // self.batch_size
self.max_steps = int(self.epoch * self.data_size)
self.learning_rate = args.learning_rate
def input_producer(self, batch_size=10):
def read_data():
img_a = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[0]])),
channels=3)
img_b = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[1]])),
channels=3)
img_a, img_b = preprocessing([img_a, img_b])
return img_a, img_b
def preprocessing(imgs):
imgs = [tf.cast(img, tf.float32) / 255.0 for img in imgs]
if self.args.model != 'color':
imgs = [tf.image.rgb_to_grayscale(img) for img in imgs]
img_crop = tf.unstack(tf.random_crop(tf.stack(imgs, axis=0), [2, self.crop_size, self.crop_size, self.chns]),
axis=0)
return img_crop
with tf.variable_scope('input'):
List_all = tf.convert_to_tensor(self.data_list, dtype=tf.string)
gt_list = List_all[:, 0]
in_list = List_all[:, 1]
self.data_queue = tf.train.slice_input_producer([in_list, gt_list], capacity=20)
image_in, image_gt = read_data()
batch_in, batch_gt = tf.train.batch([image_in, image_gt], batch_size=batch_size, num_threads=8, capacity=20)
return batch_in, batch_gt
def generator(self, inputs, reuse=False, scope='g_net'):
n, h, w, c = inputs.get_shape().as_list()
if self.args.model == 'lstm':
with tf.variable_scope('LSTM'):
cell = BasicConvLSTMCell([h / 4, w / 4], [3, 3], 128)
rnn_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32)
x_unwrap = []
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
activation_fn=tf.nn.relu, padding='SAME', normalizer_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
biases_initializer=tf.constant_initializer(0.0)):
inp_pred = inputs
for i in xrange(self.n_levels):
scale = self.scale ** (self.n_levels - i - 1)
hi = int(round(h * scale))
wi = int(round(w * scale))
inp_blur = tf.image.resize_images(inputs, [hi, wi], method=0)
inp_pred = tf.stop_gradient(tf.image.resize_images(inp_pred, [hi, wi], method=0))
inp_all = tf.concat([inp_blur, inp_pred], axis=3, name='inp')
if self.args.model == 'lstm':
rnn_state = tf.image.resize_images(rnn_state, [hi // 4, wi // 4], method=0)
# encoder
conv1_1 = slim.conv2d(inp_all, 32, [5, 5], scope='enc1_1')
conv1_2 = ResnetBlock(conv1_1, 32, 5, scope='enc1_2')
conv1_3 = ResnetBlock(conv1_2, 32, 5, scope='enc1_3')
conv1_4 = ResnetBlock(conv1_3, 32, 5, scope='enc1_4')
conv2_1 = slim.conv2d(conv1_4, 64, [5, 5], stride=2, scope='enc2_1')
conv2_2 = ResnetBlock(conv2_1, 64, 5, scope='enc2_2')
conv2_3 = ResnetBlock(conv2_2, 64, 5, scope='enc2_3')
conv2_4 = ResnetBlock(conv2_3, 64, 5, scope='enc2_4')
conv3_1 = slim.conv2d(conv2_4, 128, [5, 5], stride=2, scope='enc3_1')
conv3_2 = ResnetBlock(conv3_1, 128, 5, scope='enc3_2')
conv3_3 = ResnetBlock(conv3_2, 128, 5, scope='enc3_3')
conv3_4 = ResnetBlock(conv3_3, 128, 5, scope='enc3_4')
if self.args.model == 'lstm':
deconv3_4, rnn_state = cell(conv3_4, rnn_state)
else:
deconv3_4 = conv3_4
# decoder
deconv3_3 = ResnetBlock(deconv3_4, 128, 5, scope='dec3_3')
deconv3_2 = ResnetBlock(deconv3_3, 128, 5, scope='dec3_2')
deconv3_1 = ResnetBlock(deconv3_2, 128, 5, scope='dec3_1')
deconv2_4 = slim.conv2d_transpose(deconv3_1, 64, [4, 4], stride=2, scope='dec2_4')
cat2 = deconv2_4 + conv2_4
deconv2_3 = ResnetBlock(cat2, 64, 5, scope='dec2_3')
deconv2_2 = ResnetBlock(deconv2_3, 64, 5, scope='dec2_2')
deconv2_1 = ResnetBlock(deconv2_2, 64, 5, scope='dec2_1')
deconv1_4 = slim.conv2d_transpose(deconv2_1, 32, [4, 4], stride=2, scope='dec1_4')
cat1 = deconv1_4 + conv1_4
deconv1_3 = ResnetBlock(cat1, 32, 5, scope='dec1_3')
deconv1_2 = ResnetBlock(deconv1_3, 32, 5, scope='dec1_2')
deconv1_1 = ResnetBlock(deconv1_2, 32, 5, scope='dec1_1')
inp_pred = slim.conv2d(deconv1_1, self.chns, [5, 5], activation_fn=None, scope='dec1_0')
if i >= 0:
x_unwrap.append(inp_pred)
if i == 0:
tf.get_variable_scope().reuse_variables()
return x_unwrap
def build_model(self):
img_in, img_gt = self.input_producer(self.batch_size)
tf.summary.image('img_in', im2uint8(img_in))
tf.summary.image('img_gt', im2uint8(img_gt))
print('img_in, img_gt', img_in.get_shape(), img_gt.get_shape())
# generator
x_unwrap = self.generator(img_in, reuse=False, scope='g_net')
# calculate multi-scale loss
self.loss_total = 0
for i in xrange(self.n_levels):
_, hi, wi, _ = x_unwrap[i].get_shape().as_list()
gt_i = tf.image.resize_images(img_gt, [hi, wi], method=0)
loss = tf.reduce_mean((gt_i - x_unwrap[i]) ** 2)
self.loss_total += loss
tf.summary.image('out_' + str(i), im2uint8(x_unwrap[i]))
tf.summary.scalar('loss_' + str(i), loss)
# losses
tf.summary.scalar('loss_total', self.loss_total)
# training vars
all_vars = tf.trainable_variables()
self.all_vars = all_vars
self.g_vars = [var for var in all_vars if 'g_net' in var.name]
self.lstm_vars = [var for var in all_vars if 'LSTM' in var.name]
for var in all_vars:
print(var.name)
def train(self):
def get_optimizer(loss, global_step=None, var_list=None, is_gradient_clip=False):
train_op = tf.train.AdamOptimizer(self.lr)
if is_gradient_clip:
grads_and_vars = train_op.compute_gradients(loss, var_list=var_list)
unchanged_gvs = [(grad, var) for grad, var in grads_and_vars if not 'LSTM' in var.name]
rnn_grad = [grad for grad, var in grads_and_vars if 'LSTM' in var.name]
rnn_var = [var for grad, var in grads_and_vars if 'LSTM' in var.name]
capped_grad, _ = tf.clip_by_global_norm(rnn_grad, clip_norm=3)
capped_gvs = list(zip(capped_grad, rnn_var))
train_op = train_op.apply_gradients(grads_and_vars=capped_gvs + unchanged_gvs, global_step=global_step)
else:
train_op = train_op.minimize(loss, global_step, var_list)
return train_op
global_step = tf.Variable(initial_value=0, dtype=tf.int32, trainable=False)
self.global_step = global_step
# build model
self.build_model()
# learning rate decay
self.lr = tf.train.polynomial_decay(self.learning_rate, global_step, self.max_steps, end_learning_rate=0.0,
power=0.3)
tf.summary.scalar('learning_rate', self.lr)
# training operators
train_gnet = get_optimizer(self.loss_total, global_step, self.all_vars)
# session and thread
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self.sess = sess
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=1)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# training summary
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(self.train_dir, sess.graph, flush_secs=30)
for step in xrange(sess.run(global_step), self.max_steps + 1):
start_time = time.time()
# update G network
_, loss_total_val = sess.run([train_gnet, self.loss_total])
duration = time.time() - start_time
# print loss_value
assert not np.isnan(loss_total_val), 'Model diverged with loss = NaN'
if step % 5 == 0:
num_examples_per_step = self.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = (%.5f; %.5f, %.5f)(%.1f data/s; %.3f s/bch)')
print(format_str % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), step, loss_total_val, 0.0,
0.0, examples_per_sec, sec_per_batch))
if step % 20 == 0:
# summary_str = sess.run(summary_op, feed_dict={inputs:batch_input, gt:batch_gt})
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, global_step=step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or step == self.max_steps:
checkpoint_path = os.path.join(self.train_dir, 'checkpoints')
self.save(sess, checkpoint_path, step)
def save(self, sess, checkpoint_dir, step):
model_name = "deblur.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(sess, os.path.join(checkpoint_dir, model_name), global_step=step)
def load(self, sess, checkpoint_dir, step=None):
print(" [*] Reading checkpoints...")
model_name = "deblur.model"
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if step is not None:
ckpt_name = model_name + '-' + str(step)
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading intermediate checkpoints... Success")
return str(step)
elif ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
ckpt_iter = ckpt_name.split('-')[1]
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading updated checkpoints... Success")
return ckpt_iter
else:
print(" [*] Reading checkpoints... ERROR")
return False
def test(self, height, width, input_path, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path)
imgsName = sorted(os.listdir(input_path))
H, W = height, width
inp_chns = 3 if self.args.model == 'color' else 1
self.batch_size = 1 if self.args.model == 'color' else 3
inputs = tf.placeholder(shape=[self.batch_size, H, W, inp_chns], dtype=tf.float32)
outputs = self.generator(inputs, reuse=False)
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
self.saver = tf.train.Saver()
self.load(sess, self.train_dir, step=523000)
for imgName in imgsName:
blur = scipy.misc.imread(os.path.join(input_path, imgName))
h, w, c = blur.shape
# make sure the width is larger than the height
rot = False
if h > w:
blur = np.transpose(blur, [1, 0, 2])
rot = True
h = int(blur.shape[0])
w = int(blur.shape[1])
resize = False
if h > H or w > W:
scale = min(1.0 * H / h, 1.0 * W / w)
new_h = int(h * scale)
new_w = int(w * scale)
blur = scipy.misc.imresize(blur, [new_h, new_w], 'bicubic')
resize = True
blurPad = np.pad(blur, ((0, H - new_h), (0, W - new_w), (0, 0)), 'edge')
else:
blurPad = np.pad(blur, ((0, H - h), (0, W - w), (0, 0)), 'edge')
blurPad = np.expand_dims(blurPad, 0)
if self.args.model != 'color':
blurPad = np.transpose(blurPad, (3, 1, 2, 0))
start = time.time()
deblur = sess.run(outputs, feed_dict={inputs: blurPad / 255.0})
duration = time.time() - start
print('Saving results: %s ... %4.3fs' % (os.path.join(output_path, imgName), duration))
res = deblur[-1]
if self.args.model != 'color':
res = np.transpose(res, (3, 1, 2, 0))
res = im2uint8(res[0, :, :, :])
# crop the image into original size
if resize:
res = res[:new_h, :new_w, :]
res = scipy.misc.imresize(res, [h, w], 'bicubic')
else:
res = res[:h, :w, :]
if rot:
res = np.transpose(res, [1, 0, 2])
scipy.misc.imsave(os.path.join(output_path, imgName), res)
| 46.30094
| 121
| 0.565471
|
from __future__ import print_function
import os
import time
import random
import datetime
import scipy.misc
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from datetime import datetime
from util.util import *
from util.BasicConvLSTMCell import *
class DEBLUR(object):
def __init__(self, args):
self.args = args
self.n_levels = 3
self.scale = 0.5
self.chns = 3 if self.args.model == 'color' else 1
self.crop_size = 256
self.data_list = open(args.datalist, 'rt').read().splitlines()
self.data_list = list(map(lambda x: x.split(' '), self.data_list))
random.shuffle(self.data_list)
self.train_dir = os.path.join('./checkpoints', args.model)
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
self.batch_size = args.batch_size
self.epoch = args.epoch
self.data_size = (len(self.data_list)) // self.batch_size
self.max_steps = int(self.epoch * self.data_size)
self.learning_rate = args.learning_rate
def input_producer(self, batch_size=10):
def read_data():
img_a = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[0]])),
channels=3)
img_b = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[1]])),
channels=3)
img_a, img_b = preprocessing([img_a, img_b])
return img_a, img_b
def preprocessing(imgs):
imgs = [tf.cast(img, tf.float32) / 255.0 for img in imgs]
if self.args.model != 'color':
imgs = [tf.image.rgb_to_grayscale(img) for img in imgs]
img_crop = tf.unstack(tf.random_crop(tf.stack(imgs, axis=0), [2, self.crop_size, self.crop_size, self.chns]),
axis=0)
return img_crop
with tf.variable_scope('input'):
List_all = tf.convert_to_tensor(self.data_list, dtype=tf.string)
gt_list = List_all[:, 0]
in_list = List_all[:, 1]
self.data_queue = tf.train.slice_input_producer([in_list, gt_list], capacity=20)
image_in, image_gt = read_data()
batch_in, batch_gt = tf.train.batch([image_in, image_gt], batch_size=batch_size, num_threads=8, capacity=20)
return batch_in, batch_gt
def generator(self, inputs, reuse=False, scope='g_net'):
n, h, w, c = inputs.get_shape().as_list()
if self.args.model == 'lstm':
with tf.variable_scope('LSTM'):
cell = BasicConvLSTMCell([h / 4, w / 4], [3, 3], 128)
rnn_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32)
x_unwrap = []
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
activation_fn=tf.nn.relu, padding='SAME', normalizer_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
biases_initializer=tf.constant_initializer(0.0)):
inp_pred = inputs
for i in xrange(self.n_levels):
scale = self.scale ** (self.n_levels - i - 1)
hi = int(round(h * scale))
wi = int(round(w * scale))
inp_blur = tf.image.resize_images(inputs, [hi, wi], method=0)
inp_pred = tf.stop_gradient(tf.image.resize_images(inp_pred, [hi, wi], method=0))
inp_all = tf.concat([inp_blur, inp_pred], axis=3, name='inp')
if self.args.model == 'lstm':
rnn_state = tf.image.resize_images(rnn_state, [hi // 4, wi // 4], method=0)
conv1_1 = slim.conv2d(inp_all, 32, [5, 5], scope='enc1_1')
conv1_2 = ResnetBlock(conv1_1, 32, 5, scope='enc1_2')
conv1_3 = ResnetBlock(conv1_2, 32, 5, scope='enc1_3')
conv1_4 = ResnetBlock(conv1_3, 32, 5, scope='enc1_4')
conv2_1 = slim.conv2d(conv1_4, 64, [5, 5], stride=2, scope='enc2_1')
conv2_2 = ResnetBlock(conv2_1, 64, 5, scope='enc2_2')
conv2_3 = ResnetBlock(conv2_2, 64, 5, scope='enc2_3')
conv2_4 = ResnetBlock(conv2_3, 64, 5, scope='enc2_4')
conv3_1 = slim.conv2d(conv2_4, 128, [5, 5], stride=2, scope='enc3_1')
conv3_2 = ResnetBlock(conv3_1, 128, 5, scope='enc3_2')
conv3_3 = ResnetBlock(conv3_2, 128, 5, scope='enc3_3')
conv3_4 = ResnetBlock(conv3_3, 128, 5, scope='enc3_4')
if self.args.model == 'lstm':
deconv3_4, rnn_state = cell(conv3_4, rnn_state)
else:
deconv3_4 = conv3_4
deconv3_3 = ResnetBlock(deconv3_4, 128, 5, scope='dec3_3')
deconv3_2 = ResnetBlock(deconv3_3, 128, 5, scope='dec3_2')
deconv3_1 = ResnetBlock(deconv3_2, 128, 5, scope='dec3_1')
deconv2_4 = slim.conv2d_transpose(deconv3_1, 64, [4, 4], stride=2, scope='dec2_4')
cat2 = deconv2_4 + conv2_4
deconv2_3 = ResnetBlock(cat2, 64, 5, scope='dec2_3')
deconv2_2 = ResnetBlock(deconv2_3, 64, 5, scope='dec2_2')
deconv2_1 = ResnetBlock(deconv2_2, 64, 5, scope='dec2_1')
deconv1_4 = slim.conv2d_transpose(deconv2_1, 32, [4, 4], stride=2, scope='dec1_4')
cat1 = deconv1_4 + conv1_4
deconv1_3 = ResnetBlock(cat1, 32, 5, scope='dec1_3')
deconv1_2 = ResnetBlock(deconv1_3, 32, 5, scope='dec1_2')
deconv1_1 = ResnetBlock(deconv1_2, 32, 5, scope='dec1_1')
inp_pred = slim.conv2d(deconv1_1, self.chns, [5, 5], activation_fn=None, scope='dec1_0')
if i >= 0:
x_unwrap.append(inp_pred)
if i == 0:
tf.get_variable_scope().reuse_variables()
return x_unwrap
def build_model(self):
img_in, img_gt = self.input_producer(self.batch_size)
tf.summary.image('img_in', im2uint8(img_in))
tf.summary.image('img_gt', im2uint8(img_gt))
print('img_in, img_gt', img_in.get_shape(), img_gt.get_shape())
x_unwrap = self.generator(img_in, reuse=False, scope='g_net')
self.loss_total = 0
for i in xrange(self.n_levels):
_, hi, wi, _ = x_unwrap[i].get_shape().as_list()
gt_i = tf.image.resize_images(img_gt, [hi, wi], method=0)
loss = tf.reduce_mean((gt_i - x_unwrap[i]) ** 2)
self.loss_total += loss
tf.summary.image('out_' + str(i), im2uint8(x_unwrap[i]))
tf.summary.scalar('loss_' + str(i), loss)
tf.summary.scalar('loss_total', self.loss_total)
all_vars = tf.trainable_variables()
self.all_vars = all_vars
self.g_vars = [var for var in all_vars if 'g_net' in var.name]
self.lstm_vars = [var for var in all_vars if 'LSTM' in var.name]
for var in all_vars:
print(var.name)
def train(self):
def get_optimizer(loss, global_step=None, var_list=None, is_gradient_clip=False):
train_op = tf.train.AdamOptimizer(self.lr)
if is_gradient_clip:
grads_and_vars = train_op.compute_gradients(loss, var_list=var_list)
unchanged_gvs = [(grad, var) for grad, var in grads_and_vars if not 'LSTM' in var.name]
rnn_grad = [grad for grad, var in grads_and_vars if 'LSTM' in var.name]
rnn_var = [var for grad, var in grads_and_vars if 'LSTM' in var.name]
capped_grad, _ = tf.clip_by_global_norm(rnn_grad, clip_norm=3)
capped_gvs = list(zip(capped_grad, rnn_var))
train_op = train_op.apply_gradients(grads_and_vars=capped_gvs + unchanged_gvs, global_step=global_step)
else:
train_op = train_op.minimize(loss, global_step, var_list)
return train_op
global_step = tf.Variable(initial_value=0, dtype=tf.int32, trainable=False)
self.global_step = global_step
self.build_model()
self.lr = tf.train.polynomial_decay(self.learning_rate, global_step, self.max_steps, end_learning_rate=0.0,
power=0.3)
tf.summary.scalar('learning_rate', self.lr)
train_gnet = get_optimizer(self.loss_total, global_step, self.all_vars)
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self.sess = sess
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=1)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(self.train_dir, sess.graph, flush_secs=30)
for step in xrange(sess.run(global_step), self.max_steps + 1):
start_time = time.time()
_, loss_total_val = sess.run([train_gnet, self.loss_total])
duration = time.time() - start_time
assert not np.isnan(loss_total_val), 'Model diverged with loss = NaN'
if step % 5 == 0:
num_examples_per_step = self.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = (%.5f; %.5f, %.5f)(%.1f data/s; %.3f s/bch)')
print(format_str % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), step, loss_total_val, 0.0,
0.0, examples_per_sec, sec_per_batch))
if step % 20 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, global_step=step)
if step % 1000 == 0 or step == self.max_steps:
checkpoint_path = os.path.join(self.train_dir, 'checkpoints')
self.save(sess, checkpoint_path, step)
def save(self, sess, checkpoint_dir, step):
model_name = "deblur.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(sess, os.path.join(checkpoint_dir, model_name), global_step=step)
def load(self, sess, checkpoint_dir, step=None):
print(" [*] Reading checkpoints...")
model_name = "deblur.model"
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if step is not None:
ckpt_name = model_name + '-' + str(step)
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading intermediate checkpoints... Success")
return str(step)
elif ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
ckpt_iter = ckpt_name.split('-')[1]
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading updated checkpoints... Success")
return ckpt_iter
else:
print(" [*] Reading checkpoints... ERROR")
return False
def test(self, height, width, input_path, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path)
imgsName = sorted(os.listdir(input_path))
H, W = height, width
inp_chns = 3 if self.args.model == 'color' else 1
self.batch_size = 1 if self.args.model == 'color' else 3
inputs = tf.placeholder(shape=[self.batch_size, H, W, inp_chns], dtype=tf.float32)
outputs = self.generator(inputs, reuse=False)
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
self.saver = tf.train.Saver()
self.load(sess, self.train_dir, step=523000)
for imgName in imgsName:
blur = scipy.misc.imread(os.path.join(input_path, imgName))
h, w, c = blur.shape
rot = False
if h > w:
blur = np.transpose(blur, [1, 0, 2])
rot = True
h = int(blur.shape[0])
w = int(blur.shape[1])
resize = False
if h > H or w > W:
scale = min(1.0 * H / h, 1.0 * W / w)
new_h = int(h * scale)
new_w = int(w * scale)
blur = scipy.misc.imresize(blur, [new_h, new_w], 'bicubic')
resize = True
blurPad = np.pad(blur, ((0, H - new_h), (0, W - new_w), (0, 0)), 'edge')
else:
blurPad = np.pad(blur, ((0, H - h), (0, W - w), (0, 0)), 'edge')
blurPad = np.expand_dims(blurPad, 0)
if self.args.model != 'color':
blurPad = np.transpose(blurPad, (3, 1, 2, 0))
start = time.time()
deblur = sess.run(outputs, feed_dict={inputs: blurPad / 255.0})
duration = time.time() - start
print('Saving results: %s ... %4.3fs' % (os.path.join(output_path, imgName), duration))
res = deblur[-1]
if self.args.model != 'color':
res = np.transpose(res, (3, 1, 2, 0))
res = im2uint8(res[0, :, :, :])
if resize:
res = res[:new_h, :new_w, :]
res = scipy.misc.imresize(res, [h, w], 'bicubic')
else:
res = res[:h, :w, :]
if rot:
res = np.transpose(res, [1, 0, 2])
scipy.misc.imsave(os.path.join(output_path, imgName), res)
| true
| true
|
790720cba5e6becaf5be0336c2f2ab24b0d0d12e
| 21,274
|
py
|
Python
|
qiskit/visualization/gate_map.py
|
navaneethsdk/qiskit-terra
|
66a029f2a67c14dbf34857d172b088d75d152b55
|
[
"Apache-2.0"
] | null | null | null |
qiskit/visualization/gate_map.py
|
navaneethsdk/qiskit-terra
|
66a029f2a67c14dbf34857d172b088d75d152b55
|
[
"Apache-2.0"
] | 12
|
2018-09-21T12:02:18.000Z
|
2018-09-25T09:14:59.000Z
|
qiskit/visualization/gate_map.py
|
navaneethsdk/qiskit-terra
|
66a029f2a67c14dbf34857d172b088d75d152b55
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A module for visualizing device coupling maps"""
import math
import numpy as np
from qiskit.exceptions import QiskitError
from .matplotlib import HAS_MATPLOTLIB
from .exceptions import VisualizationError
class _GraphDist():
"""Transform the circles properly for non-square axes.
"""
def __init__(self, size, ax, x=True):
self.size = size
self.ax = ax # pylint: disable=invalid-name
self.x = x
@property
def dist_real(self):
"""Compute distance.
"""
x0, y0 = self.ax.transAxes.transform( # pylint: disable=invalid-name
(0, 0))
x1, y1 = self.ax.transAxes.transform( # pylint: disable=invalid-name
(1, 1))
value = x1 - x0 if self.x else y1 - y0
return value
@property
def dist_abs(self):
"""Distance abs
"""
bounds = self.ax.get_xlim() if self.x else self.ax.get_ylim()
return bounds[0] - bounds[1]
@property
def value(self):
"""Return value.
"""
return (self.size / self.dist_real) * self.dist_abs
def __mul__(self, obj):
return self.value * obj
def plot_gate_map(backend, figsize=None,
plot_directed=False,
label_qubits=True,
qubit_size=24,
line_width=4,
font_size=12,
qubit_color=None,
qubit_labels=None,
line_color=None,
font_color='w',
ax=None):
"""Plots the gate map of a device.
Args:
backend (BaseBackend): A backend instance,
figsize (tuple): Output figure size (wxh) in inches.
plot_directed (bool): Plot directed coupling map.
label_qubits (bool): Label the qubits.
qubit_size (float): Size of qubit marker.
line_width (float): Width of lines.
font_size (int): Font size of qubit labels.
qubit_color (list): A list of colors for the qubits
qubit_labels (list): A list of qubit labels
line_color (list): A list of colors for each line from coupling_map.
font_color (str): The font color for the qubit labels.
ax (Axes): A Matplotlib axes instance.
Returns:
Figure: A Matplotlib figure instance.
Raises:
QiskitError: if tried to pass a simulator.
ImportError: if matplotlib not installed.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_gate_map
%matplotlib inline
provider = IBMQ.load_account()
accountProvider = IBMQ.get_provider(hub='ibm-q')
backend = accountProvider.get_backend('ibmq_vigo')
plot_gate_map(backend)
"""
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed. To install, '
'run "pip install matplotlib".')
from matplotlib import get_backend
import matplotlib.pyplot as plt # pylint: disable=import-error
import matplotlib.patches as mpatches
if backend.configuration().simulator:
raise QiskitError('Requires a device backend, not simulator.')
input_axes = False
if ax:
input_axes = True
mpl_data = {}
mpl_data[1] = [[0, 0]]
mpl_data[5] = [[1, 0], [0, 1], [1, 1], [1, 2], [2, 1]]
mpl_data[7] = [[0, 0], [0, 1], [0, 2],
[1, 1],
[2, 0], [2, 1], [2, 2]]
mpl_data[20] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[1, 0], [1, 1], [1, 2], [1, 3], [1, 4],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[3, 0], [3, 1], [3, 2], [3, 3], [3, 4]]
mpl_data[15] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[0, 5], [0, 6], [1, 7], [1, 6], [1, 5],
[1, 4], [1, 3], [1, 2], [1, 1], [1, 0]]
mpl_data[16] = [[1, 0], [0, 0], [0, 1], [0, 2], [0, 3],
[0, 4], [0, 5], [0, 6], [0, 7], [1, 7],
[1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1]]
mpl_data[27] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2],
[3, 2], [0, 3], [1, 3], [3, 3], [4, 3],
[1, 4], [3, 4], [1, 5], [2, 5], [3, 5],
[1, 6], [3, 6], [0, 7], [1, 7], [3, 7],
[4, 7], [1, 8], [3, 8], [1, 9], [2, 9],
[3, 9], [3, 10]]
mpl_data[28] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[1, 2], [1, 6],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8],
[3, 0], [3, 4], [3, 8],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8]]
mpl_data[53] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[1, 2], [1, 6],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8],
[3, 0], [3, 4], [3, 8],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8],
[5, 2], [5, 6],
[6, 0], [6, 1], [6, 2], [6, 3], [6, 4],
[6, 5], [6, 6], [6, 7], [6, 8],
[7, 0], [7, 4], [7, 8],
[8, 0], [8, 1], [8, 2], [8, 3], [8, 4],
[8, 5], [8, 6], [8, 7], [8, 8],
[9, 2], [9, 6]]
mpl_data[65] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[0, 5], [0, 6], [0, 7], [0, 8], [0, 9],
[1, 0], [1, 4], [1, 8],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8], [2, 9], [2, 10],
[3, 2], [3, 6], [3, 10],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8], [4, 9], [4, 10],
[5, 0], [5, 4], [5, 8],
[6, 0], [6, 1], [6, 2], [6, 3], [6, 4],
[6, 5], [6, 6], [6, 7], [6, 8], [6, 9], [6, 10],
[7, 2], [7, 6], [7, 10],
[8, 1], [8, 2], [8, 3], [8, 4],
[8, 5], [8, 6], [8, 7], [8, 8], [8, 9], [8, 10]]
config = backend.configuration()
num_qubits = config.n_qubits
cmap = config.coupling_map
if qubit_labels is None:
qubit_labels = list(range(num_qubits))
else:
if len(qubit_labels) != num_qubits:
raise QiskitError('Length of qubit labels '
'does not equal number '
'of qubits.')
if num_qubits in mpl_data.keys():
grid_data = mpl_data[num_qubits]
else:
if not input_axes:
fig, ax = plt.subplots(figsize=(5, 5)) # pylint: disable=invalid-name
ax.axis('off')
return fig
x_max = max([d[1] for d in grid_data])
y_max = max([d[0] for d in grid_data])
max_dim = max(x_max, y_max)
if figsize is None:
if num_qubits == 1 or (x_max / max_dim > 0.33 and y_max / max_dim > 0.33):
figsize = (5, 5)
else:
figsize = (9, 3)
if ax is None:
fig, ax = plt.subplots(figsize=figsize) # pylint: disable=invalid-name
ax.axis('off')
# set coloring
if qubit_color is None:
qubit_color = ['#648fff'] * config.n_qubits
if line_color is None:
line_color = ['#648fff'] * len(cmap) if cmap else []
# Add lines for couplings
if num_qubits != 1:
for ind, edge in enumerate(cmap):
is_symmetric = False
if edge[::-1] in cmap:
is_symmetric = True
y_start = grid_data[edge[0]][0]
x_start = grid_data[edge[0]][1]
y_end = grid_data[edge[1]][0]
x_end = grid_data[edge[1]][1]
if is_symmetric:
if y_start == y_end:
x_end = (x_end - x_start) / 2 + x_start
elif x_start == x_end:
y_end = (y_end - y_start) / 2 + y_start
else:
x_end = (x_end - x_start) / 2 + x_start
y_end = (y_end - y_start) / 2 + y_start
ax.add_artist(plt.Line2D([x_start, x_end], [-y_start, -y_end],
color=line_color[ind], linewidth=line_width,
zorder=0))
if plot_directed:
dx = x_end - x_start # pylint: disable=invalid-name
dy = y_end - y_start # pylint: disable=invalid-name
if is_symmetric:
x_arrow = x_start + dx * 0.95
y_arrow = -y_start - dy * 0.95
dx_arrow = dx * 0.01
dy_arrow = -dy * 0.01
head_width = 0.15
else:
x_arrow = x_start + dx * 0.5
y_arrow = -y_start - dy * 0.5
dx_arrow = dx * 0.2
dy_arrow = -dy * 0.2
head_width = 0.2
ax.add_patch(mpatches.FancyArrow(x_arrow,
y_arrow,
dx_arrow,
dy_arrow,
head_width=head_width,
length_includes_head=True,
edgecolor=None,
linewidth=0,
facecolor=line_color[ind],
zorder=1))
# Add circles for qubits
for var, idx in enumerate(grid_data):
_idx = [idx[1], -idx[0]]
width = _GraphDist(qubit_size, ax, True)
height = _GraphDist(qubit_size, ax, False)
ax.add_artist(mpatches.Ellipse(
_idx, width, height, color=qubit_color[var], zorder=1))
if label_qubits:
ax.text(*_idx, s=qubit_labels[var],
horizontalalignment='center',
verticalalignment='center',
color=font_color, size=font_size, weight='bold')
ax.set_xlim([-1, x_max + 1])
ax.set_ylim([-(y_max + 1), 1])
if not input_axes:
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(fig)
return fig
return None
def plot_circuit_layout(circuit, backend, view='virtual'):
"""Plot the layout of a circuit transpiled for a given
target backend.
Args:
circuit (QuantumCircuit): Input quantum circuit.
backend (BaseBackend): Target backend.
view (str): Layout view: either 'virtual' or 'physical'.
Returns:
Figure: A matplotlib figure showing layout.
Raises:
QiskitError: Invalid view type given.
VisualizationError: Circuit has no layout attribute.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit, IBMQ, transpile
from qiskit.visualization import plot_histogram, plot_gate_map, plot_circuit_layout
from qiskit.tools.monitor import job_monitor
import matplotlib.pyplot as plt
%matplotlib inline
IBMQ.load_account()
ghz = QuantumCircuit(3, 3)
ghz.h(0)
for idx in range(1,3):
ghz.cx(0,idx)
ghz.measure(range(3), range(3))
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
new_circ_lv3 = transpile(ghz, backend=backend, optimization_level=3)
plot_circuit_layout(new_circ_lv3, backend)
"""
if circuit._layout is None:
raise QiskitError('Circuit has no layout. '
'Perhaps it has not been transpiled.')
num_qubits = backend.configuration().n_qubits
qubits = []
qubit_labels = [None] * num_qubits
if view == 'virtual':
for key, val in circuit._layout.get_virtual_bits().items():
if key.register.name != 'ancilla':
qubits.append(val)
qubit_labels[val] = key.index
elif view == 'physical':
for key, val in circuit._layout.get_physical_bits().items():
if val.register.name != 'ancilla':
qubits.append(key)
qubit_labels[key] = key
else:
raise VisualizationError("Layout view must be 'virtual' or 'physical'.")
qcolors = ['#648fff'] * num_qubits
for k in qubits:
qcolors[k] = 'k'
cmap = backend.configuration().coupling_map
lcolors = ['#648fff'] * len(cmap)
for idx, edge in enumerate(cmap):
if edge[0] in qubits and edge[1] in qubits:
lcolors[idx] = 'k'
fig = plot_gate_map(backend,
qubit_color=qcolors,
qubit_labels=qubit_labels,
line_color=lcolors)
return fig
def plot_error_map(backend, figsize=(12, 9), show_title=True):
"""Plots the error map of a given backend.
Args:
backend (IBMQBackend): Given backend.
figsize (tuple): Figure size in inches.
show_title (bool): Show the title or not.
Returns:
Figure: A matplotlib figure showing error map.
Raises:
VisualizationError: Input is not IBMQ backend.
ImportError: If seaborn is not installed
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_error_map
%matplotlib inline
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
plot_error_map(backend)
"""
try:
import seaborn as sns
except ImportError:
raise ImportError('Must have seaborn installed to use plot_error_map. '
'To install, run "pip install seaborn".')
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed. To install, '
'run "pip install matplotlib".')
import matplotlib
from matplotlib import get_backend
import matplotlib.pyplot as plt # pylint: disable=import-error
import matplotlib.gridspec as gridspec
from matplotlib import ticker
color_map = sns.cubehelix_palette(reverse=True, as_cmap=True)
props = backend.properties().to_dict()
config = backend.configuration().to_dict()
num_qubits = config['n_qubits']
# U2 error rates
single_gate_errors = [0]*num_qubits
for gate in props['gates']:
if gate['gate'] == 'u2':
_qubit = gate['qubits'][0]
single_gate_errors[_qubit] = gate['parameters'][0]['value']
# Convert to percent
single_gate_errors = 100 * np.asarray(single_gate_errors)
avg_1q_err = np.mean(single_gate_errors)
single_norm = matplotlib.colors.Normalize(
vmin=min(single_gate_errors), vmax=max(single_gate_errors))
q_colors = [color_map(single_norm(err)) for err in single_gate_errors]
cmap = config['coupling_map']
directed = False
line_colors = []
if cmap:
directed = False
if num_qubits < 20:
for edge in cmap:
if not [edge[1], edge[0]] in cmap:
directed = True
break
cx_errors = []
for line in cmap:
for item in props['gates']:
if item['qubits'] == line:
cx_errors.append(item['parameters'][0]['value'])
break
else:
continue
# Convert to percent
cx_errors = 100 * np.asarray(cx_errors)
avg_cx_err = np.mean(cx_errors)
cx_norm = matplotlib.colors.Normalize(
vmin=min(cx_errors), vmax=max(cx_errors))
line_colors = [color_map(cx_norm(err)) for err in cx_errors]
# Measurement errors
read_err = []
for qubit in range(num_qubits):
for item in props['qubits'][qubit]:
if item['name'] == 'readout_error':
read_err.append(item['value'])
read_err = 100 * np.asarray(read_err)
avg_read_err = np.mean(read_err)
max_read_err = np.max(read_err)
fig = plt.figure(figsize=figsize)
gridspec.GridSpec(nrows=2, ncols=3)
grid_spec = gridspec.GridSpec(12, 12, height_ratios=[1] * 11 + [0.5],
width_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2])
left_ax = plt.subplot(grid_spec[2:10, :1])
main_ax = plt.subplot(grid_spec[:11, 1:11])
right_ax = plt.subplot(grid_spec[2:10, 11:])
bleft_ax = plt.subplot(grid_spec[-1, :5])
if cmap:
bright_ax = plt.subplot(grid_spec[-1, 7:])
plot_gate_map(backend, qubit_color=q_colors,
line_color=line_colors,
qubit_size=28,
line_width=5,
plot_directed=directed,
ax=main_ax)
main_ax.axis('off')
main_ax.set_aspect(1)
if cmap:
single_cb = matplotlib.colorbar.ColorbarBase(bleft_ax, cmap=color_map,
norm=single_norm,
orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
single_cb.locator = tick_locator
single_cb.update_ticks()
single_cb.update_ticks()
bleft_ax.set_title('H error rate (%) [Avg. = {}]'.format(round(avg_1q_err, 3)))
if cmap is None:
bleft_ax.axis('off')
bleft_ax.set_title('H error rate (%) = {}'.format(round(avg_1q_err, 3)))
if cmap:
cx_cb = matplotlib.colorbar.ColorbarBase(bright_ax, cmap=color_map,
norm=cx_norm,
orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
cx_cb.locator = tick_locator
cx_cb.update_ticks()
bright_ax.set_title('CNOT error rate (%) [Avg. = {}]'.format(round(avg_cx_err, 3)))
if num_qubits < 10:
num_left = num_qubits
num_right = 0
else:
num_left = math.ceil(num_qubits / 2)
num_right = num_qubits - num_left
left_ax.barh(range(num_left), read_err[:num_left], align='center', color='#DDBBBA')
left_ax.axvline(avg_read_err, linestyle='--', color='#212121')
left_ax.set_yticks(range(num_left))
left_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
left_ax.set_yticklabels([str(kk) for kk in range(num_left)], fontsize=12)
left_ax.invert_yaxis()
left_ax.set_title('Readout Error (%)', fontsize=12)
for spine in left_ax.spines.values():
spine.set_visible(False)
if num_right:
right_ax.barh(range(num_left, num_qubits), read_err[num_left:],
align='center', color='#DDBBBA')
right_ax.axvline(avg_read_err, linestyle='--', color='#212121')
right_ax.set_yticks(range(num_left, num_qubits))
right_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
right_ax.set_yticklabels([str(kk) for kk in range(num_left, num_qubits)],
fontsize=12)
right_ax.invert_yaxis()
right_ax.invert_xaxis()
right_ax.yaxis.set_label_position("right")
right_ax.yaxis.tick_right()
right_ax.set_title('Readout Error (%)', fontsize=12)
else:
right_ax.axis('off')
for spine in right_ax.spines.values():
spine.set_visible(False)
if show_title:
fig.suptitle('{name} Error Map'.format(name=backend.name()),
fontsize=24, y=0.9)
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(fig)
return fig
| 35.754622
| 95
| 0.512363
|
import math
import numpy as np
from qiskit.exceptions import QiskitError
from .matplotlib import HAS_MATPLOTLIB
from .exceptions import VisualizationError
class _GraphDist():
def __init__(self, size, ax, x=True):
self.size = size
self.ax = ax
self.x = x
@property
def dist_real(self):
x0, y0 = self.ax.transAxes.transform(
(0, 0))
x1, y1 = self.ax.transAxes.transform(
(1, 1))
value = x1 - x0 if self.x else y1 - y0
return value
@property
def dist_abs(self):
bounds = self.ax.get_xlim() if self.x else self.ax.get_ylim()
return bounds[0] - bounds[1]
@property
def value(self):
return (self.size / self.dist_real) * self.dist_abs
def __mul__(self, obj):
return self.value * obj
def plot_gate_map(backend, figsize=None,
plot_directed=False,
label_qubits=True,
qubit_size=24,
line_width=4,
font_size=12,
qubit_color=None,
qubit_labels=None,
line_color=None,
font_color='w',
ax=None):
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed. To install, '
'run "pip install matplotlib".')
from matplotlib import get_backend
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if backend.configuration().simulator:
raise QiskitError('Requires a device backend, not simulator.')
input_axes = False
if ax:
input_axes = True
mpl_data = {}
mpl_data[1] = [[0, 0]]
mpl_data[5] = [[1, 0], [0, 1], [1, 1], [1, 2], [2, 1]]
mpl_data[7] = [[0, 0], [0, 1], [0, 2],
[1, 1],
[2, 0], [2, 1], [2, 2]]
mpl_data[20] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[1, 0], [1, 1], [1, 2], [1, 3], [1, 4],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[3, 0], [3, 1], [3, 2], [3, 3], [3, 4]]
mpl_data[15] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[0, 5], [0, 6], [1, 7], [1, 6], [1, 5],
[1, 4], [1, 3], [1, 2], [1, 1], [1, 0]]
mpl_data[16] = [[1, 0], [0, 0], [0, 1], [0, 2], [0, 3],
[0, 4], [0, 5], [0, 6], [0, 7], [1, 7],
[1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1]]
mpl_data[27] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2],
[3, 2], [0, 3], [1, 3], [3, 3], [4, 3],
[1, 4], [3, 4], [1, 5], [2, 5], [3, 5],
[1, 6], [3, 6], [0, 7], [1, 7], [3, 7],
[4, 7], [1, 8], [3, 8], [1, 9], [2, 9],
[3, 9], [3, 10]]
mpl_data[28] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[1, 2], [1, 6],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8],
[3, 0], [3, 4], [3, 8],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8]]
mpl_data[53] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[1, 2], [1, 6],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8],
[3, 0], [3, 4], [3, 8],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8],
[5, 2], [5, 6],
[6, 0], [6, 1], [6, 2], [6, 3], [6, 4],
[6, 5], [6, 6], [6, 7], [6, 8],
[7, 0], [7, 4], [7, 8],
[8, 0], [8, 1], [8, 2], [8, 3], [8, 4],
[8, 5], [8, 6], [8, 7], [8, 8],
[9, 2], [9, 6]]
mpl_data[65] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[0, 5], [0, 6], [0, 7], [0, 8], [0, 9],
[1, 0], [1, 4], [1, 8],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8], [2, 9], [2, 10],
[3, 2], [3, 6], [3, 10],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8], [4, 9], [4, 10],
[5, 0], [5, 4], [5, 8],
[6, 0], [6, 1], [6, 2], [6, 3], [6, 4],
[6, 5], [6, 6], [6, 7], [6, 8], [6, 9], [6, 10],
[7, 2], [7, 6], [7, 10],
[8, 1], [8, 2], [8, 3], [8, 4],
[8, 5], [8, 6], [8, 7], [8, 8], [8, 9], [8, 10]]
config = backend.configuration()
num_qubits = config.n_qubits
cmap = config.coupling_map
if qubit_labels is None:
qubit_labels = list(range(num_qubits))
else:
if len(qubit_labels) != num_qubits:
raise QiskitError('Length of qubit labels '
'does not equal number '
'of qubits.')
if num_qubits in mpl_data.keys():
grid_data = mpl_data[num_qubits]
else:
if not input_axes:
fig, ax = plt.subplots(figsize=(5, 5))
ax.axis('off')
return fig
x_max = max([d[1] for d in grid_data])
y_max = max([d[0] for d in grid_data])
max_dim = max(x_max, y_max)
if figsize is None:
if num_qubits == 1 or (x_max / max_dim > 0.33 and y_max / max_dim > 0.33):
figsize = (5, 5)
else:
figsize = (9, 3)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.axis('off')
if qubit_color is None:
qubit_color = ['#648fff'] * config.n_qubits
if line_color is None:
line_color = ['#648fff'] * len(cmap) if cmap else []
if num_qubits != 1:
for ind, edge in enumerate(cmap):
is_symmetric = False
if edge[::-1] in cmap:
is_symmetric = True
y_start = grid_data[edge[0]][0]
x_start = grid_data[edge[0]][1]
y_end = grid_data[edge[1]][0]
x_end = grid_data[edge[1]][1]
if is_symmetric:
if y_start == y_end:
x_end = (x_end - x_start) / 2 + x_start
elif x_start == x_end:
y_end = (y_end - y_start) / 2 + y_start
else:
x_end = (x_end - x_start) / 2 + x_start
y_end = (y_end - y_start) / 2 + y_start
ax.add_artist(plt.Line2D([x_start, x_end], [-y_start, -y_end],
color=line_color[ind], linewidth=line_width,
zorder=0))
if plot_directed:
dx = x_end - x_start
dy = y_end - y_start
if is_symmetric:
x_arrow = x_start + dx * 0.95
y_arrow = -y_start - dy * 0.95
dx_arrow = dx * 0.01
dy_arrow = -dy * 0.01
head_width = 0.15
else:
x_arrow = x_start + dx * 0.5
y_arrow = -y_start - dy * 0.5
dx_arrow = dx * 0.2
dy_arrow = -dy * 0.2
head_width = 0.2
ax.add_patch(mpatches.FancyArrow(x_arrow,
y_arrow,
dx_arrow,
dy_arrow,
head_width=head_width,
length_includes_head=True,
edgecolor=None,
linewidth=0,
facecolor=line_color[ind],
zorder=1))
for var, idx in enumerate(grid_data):
_idx = [idx[1], -idx[0]]
width = _GraphDist(qubit_size, ax, True)
height = _GraphDist(qubit_size, ax, False)
ax.add_artist(mpatches.Ellipse(
_idx, width, height, color=qubit_color[var], zorder=1))
if label_qubits:
ax.text(*_idx, s=qubit_labels[var],
horizontalalignment='center',
verticalalignment='center',
color=font_color, size=font_size, weight='bold')
ax.set_xlim([-1, x_max + 1])
ax.set_ylim([-(y_max + 1), 1])
if not input_axes:
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(fig)
return fig
return None
def plot_circuit_layout(circuit, backend, view='virtual'):
if circuit._layout is None:
raise QiskitError('Circuit has no layout. '
'Perhaps it has not been transpiled.')
num_qubits = backend.configuration().n_qubits
qubits = []
qubit_labels = [None] * num_qubits
if view == 'virtual':
for key, val in circuit._layout.get_virtual_bits().items():
if key.register.name != 'ancilla':
qubits.append(val)
qubit_labels[val] = key.index
elif view == 'physical':
for key, val in circuit._layout.get_physical_bits().items():
if val.register.name != 'ancilla':
qubits.append(key)
qubit_labels[key] = key
else:
raise VisualizationError("Layout view must be 'virtual' or 'physical'.")
qcolors = ['#648fff'] * num_qubits
for k in qubits:
qcolors[k] = 'k'
cmap = backend.configuration().coupling_map
lcolors = ['#648fff'] * len(cmap)
for idx, edge in enumerate(cmap):
if edge[0] in qubits and edge[1] in qubits:
lcolors[idx] = 'k'
fig = plot_gate_map(backend,
qubit_color=qcolors,
qubit_labels=qubit_labels,
line_color=lcolors)
return fig
def plot_error_map(backend, figsize=(12, 9), show_title=True):
try:
import seaborn as sns
except ImportError:
raise ImportError('Must have seaborn installed to use plot_error_map. '
'To install, run "pip install seaborn".')
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed. To install, '
'run "pip install matplotlib".')
import matplotlib
from matplotlib import get_backend
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import ticker
color_map = sns.cubehelix_palette(reverse=True, as_cmap=True)
props = backend.properties().to_dict()
config = backend.configuration().to_dict()
num_qubits = config['n_qubits']
single_gate_errors = [0]*num_qubits
for gate in props['gates']:
if gate['gate'] == 'u2':
_qubit = gate['qubits'][0]
single_gate_errors[_qubit] = gate['parameters'][0]['value']
single_gate_errors = 100 * np.asarray(single_gate_errors)
avg_1q_err = np.mean(single_gate_errors)
single_norm = matplotlib.colors.Normalize(
vmin=min(single_gate_errors), vmax=max(single_gate_errors))
q_colors = [color_map(single_norm(err)) for err in single_gate_errors]
cmap = config['coupling_map']
directed = False
line_colors = []
if cmap:
directed = False
if num_qubits < 20:
for edge in cmap:
if not [edge[1], edge[0]] in cmap:
directed = True
break
cx_errors = []
for line in cmap:
for item in props['gates']:
if item['qubits'] == line:
cx_errors.append(item['parameters'][0]['value'])
break
else:
continue
cx_errors = 100 * np.asarray(cx_errors)
avg_cx_err = np.mean(cx_errors)
cx_norm = matplotlib.colors.Normalize(
vmin=min(cx_errors), vmax=max(cx_errors))
line_colors = [color_map(cx_norm(err)) for err in cx_errors]
read_err = []
for qubit in range(num_qubits):
for item in props['qubits'][qubit]:
if item['name'] == 'readout_error':
read_err.append(item['value'])
read_err = 100 * np.asarray(read_err)
avg_read_err = np.mean(read_err)
max_read_err = np.max(read_err)
fig = plt.figure(figsize=figsize)
gridspec.GridSpec(nrows=2, ncols=3)
grid_spec = gridspec.GridSpec(12, 12, height_ratios=[1] * 11 + [0.5],
width_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2])
left_ax = plt.subplot(grid_spec[2:10, :1])
main_ax = plt.subplot(grid_spec[:11, 1:11])
right_ax = plt.subplot(grid_spec[2:10, 11:])
bleft_ax = plt.subplot(grid_spec[-1, :5])
if cmap:
bright_ax = plt.subplot(grid_spec[-1, 7:])
plot_gate_map(backend, qubit_color=q_colors,
line_color=line_colors,
qubit_size=28,
line_width=5,
plot_directed=directed,
ax=main_ax)
main_ax.axis('off')
main_ax.set_aspect(1)
if cmap:
single_cb = matplotlib.colorbar.ColorbarBase(bleft_ax, cmap=color_map,
norm=single_norm,
orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
single_cb.locator = tick_locator
single_cb.update_ticks()
single_cb.update_ticks()
bleft_ax.set_title('H error rate (%) [Avg. = {}]'.format(round(avg_1q_err, 3)))
if cmap is None:
bleft_ax.axis('off')
bleft_ax.set_title('H error rate (%) = {}'.format(round(avg_1q_err, 3)))
if cmap:
cx_cb = matplotlib.colorbar.ColorbarBase(bright_ax, cmap=color_map,
norm=cx_norm,
orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
cx_cb.locator = tick_locator
cx_cb.update_ticks()
bright_ax.set_title('CNOT error rate (%) [Avg. = {}]'.format(round(avg_cx_err, 3)))
if num_qubits < 10:
num_left = num_qubits
num_right = 0
else:
num_left = math.ceil(num_qubits / 2)
num_right = num_qubits - num_left
left_ax.barh(range(num_left), read_err[:num_left], align='center', color='#DDBBBA')
left_ax.axvline(avg_read_err, linestyle='--', color='#212121')
left_ax.set_yticks(range(num_left))
left_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
left_ax.set_yticklabels([str(kk) for kk in range(num_left)], fontsize=12)
left_ax.invert_yaxis()
left_ax.set_title('Readout Error (%)', fontsize=12)
for spine in left_ax.spines.values():
spine.set_visible(False)
if num_right:
right_ax.barh(range(num_left, num_qubits), read_err[num_left:],
align='center', color='#DDBBBA')
right_ax.axvline(avg_read_err, linestyle='--', color='#212121')
right_ax.set_yticks(range(num_left, num_qubits))
right_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
right_ax.set_yticklabels([str(kk) for kk in range(num_left, num_qubits)],
fontsize=12)
right_ax.invert_yaxis()
right_ax.invert_xaxis()
right_ax.yaxis.set_label_position("right")
right_ax.yaxis.tick_right()
right_ax.set_title('Readout Error (%)', fontsize=12)
else:
right_ax.axis('off')
for spine in right_ax.spines.values():
spine.set_visible(False)
if show_title:
fig.suptitle('{name} Error Map'.format(name=backend.name()),
fontsize=24, y=0.9)
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(fig)
return fig
| true
| true
|
7907234ce747ad9312d8cc9fd355b23c721cfba2
| 21,323
|
py
|
Python
|
quex/input/files/specifier/counter.py
|
Liby99/quex
|
45f3d21d5df3307376e175cca2d8473e26cb5622
|
[
"MIT"
] | null | null | null |
quex/input/files/specifier/counter.py
|
Liby99/quex
|
45f3d21d5df3307376e175cca2d8473e26cb5622
|
[
"MIT"
] | 1
|
2022-01-31T18:08:44.000Z
|
2022-01-31T18:08:44.000Z
|
quex/input/files/specifier/counter.py
|
raccoonmonk/quex
|
20ffe451df9fd49bdc216ce45b8263fa228670e5
|
[
"MIT"
] | null | null | null |
# Project Quex (http://quex.sourceforge.net); License: MIT;
# (C) 2005-2020 Frank-Rene Schaefer;
#_______________________________________________________________________________
from quex.input.setup import NotificationDB
from quex.input.regular_expression.pattern import Pattern_Prep
import quex.input.regular_expression.core as regular_expression
from quex.input.code.base import SourceRef, \
SourceRef_DEFAULT, \
SourceRefObject
from quex.engine.state_machine.core import DFA
import quex.engine.state_machine.construction.sequentialize as sequentialize
import quex.engine.state_machine.construction.repeat as repeat
import quex.engine.state_machine.algebra.difference as difference
import quex.engine.state_machine.algebra.intersection as intersection
import quex.engine.state_machine.algorithm.beautifier as beautifier
import quex.engine.state_machine.check.swallow as swallow
import quex.engine.state_machine.check.outrun as outrun
import quex.engine.state_machine.check.identity as identity
import quex.engine.state_machine.check.tail as tail
from quex.engine.misc.tools import typed
from quex.engine.misc.interval_handling import NumberSet
from quex.engine.counter import IndentationCount_Pre, \
cc_type_name_db, \
cc_type_db
from quex.engine.counter_builder import CountActionMap_Builder
import quex.engine.misc.error as error
import quex.engine.misc.error_check as error_check
from quex.engine.misc.file_in import check, \
check_or_die, \
skip_whitespace, \
read_identifier, \
read_integer
from quex.constants import E_CharacterCountType
from quex.blackboard import setup as Setup
def parse_CountActionMap(fh):
return _base_parse(fh, CountActionMapFromParser_Builder(fh))
def parse_IndentationSetup(fh):
return _base_parse(fh, IndentationSetup_Builder(fh))
def _base_parse(fh, builder, IndentationSetupF=False):
"""Parses pattern definitions of the form:
[ \t] => grid 4;
[:intersection([:alpha:], [\X064-\X066]):] => space 1;
In other words the right hand side *must* be a character set.
ADAPTS: result to contain parsing information.
"""
# NOTE: Catching of EOF happens in caller: parse_section(...)
#
while 1 + 1 == 2:
skip_whitespace(fh)
if check(fh, ">"):
break
# A regular expression state machine
pattern, identifier, sr = _parse_definition_head(fh, builder.identifier_list)
if pattern is None and not builder.keyword_else_f:
error.log("Keyword '\\else' cannot be used in indentation setup.", fh)
# '_parse_definition_head()' ensures that only identifiers mentioned in
# 'result' are accepted.
if builder.requires_count():
count = _read_value_specifier(fh, identifier, 1)
builder.specify(identifier, pattern, count, sr)
else:
builder.specify(identifier, pattern, sr)
if not check(fh, ";"):
error.log("Missing ';' after '%s' specification." % identifier, fh)
return builder.finalize()
class CharacterSetVsAction_BuilderBase:
def __init__(self, IdentifierList, KeywordElseAdmissibleF):
self.identifier_list = IdentifierList
self.keyword_else_f = KeywordElseAdmissibleF
class CountActionMapFromParser_Builder(CharacterSetVsAction_BuilderBase):
"""Line/column number count specification.
___________________________________________________________________________
The main result of the parsing the the Base's .count_command_map which is
an instance of CountActionMap_Builder.
____________________________________________________________________________
"""
@typed(sr=SourceRef)
def __init__(self, fh):
self.sr = SourceRef.from_FileHandle(fh)
self.__fh = fh
self._ca_map_builder = CountActionMap_Builder()
CharacterSetVsAction_BuilderBase.__init__(self,
("columns", "grid", "lines"),
KeywordElseAdmissibleF=True)
def finalize(self):
# Finalize / Produce 'LineColumnCount' object.
#
ca_map = self._ca_map_builder.finalize(
Setup.buffer_encoding.source_set.minimum(),
Setup.buffer_encoding.source_set.least_greater_bound(),
self.sr)
_check_grid_values_integer_multiples(ca_map)
check_defined(ca_map, self.sr, E_CharacterCountType.LINE)
return ca_map
def requires_count(self):
return True
@typed(sr=SourceRef, Identifier=(str,str))
def specify(self, Identifier, Pattern, Count, sr):
if Pattern is None:
self._ca_map_builder.define_else(cc_type_db[Identifier], Count, sr)
else:
trigger_set = _extract_trigger_set(sr, Identifier, Pattern)
self._ca_map_builder.add(trigger_set, cc_type_db[Identifier], Count, sr)
class IndentationSetup_Builder(CharacterSetVsAction_BuilderBase):
"""Indentation counter specification.
____________________________________________________________________________
The base's .count_command_map contains information about how to count the
space at the beginning of the line. The count until the first non-whitespace
is the 'indentation'.
+bad:
The spec contains information about what characters are not supposed to
appear in indentation (bad characters). Depending on the philosophical
basis, some might consider 'space' as evil, others consider 'tab' as evil.
+newline:
A detailed state machine can be defined for 'newline'. This might be
'\n|(\r\n)' or more complex things.
+suppressor:
A newline might be suppressed by '\' for example. For that, it might be
specified as 'newline suppressor'.
____________________________________________________________________________
"""
@typed(sr=SourceRef)
def __init__(self, fh):
self.__fh = fh
self.sm_whitespace = SourceRefObject("whitespace", None)
self.sm_badspace = SourceRefObject("bad", None)
self.sm_newline = SourceRefObject("newline", None)
self.sm_newline_suppressor = SourceRefObject("suppressor", None)
self.sm_suspend_list = []
if fh == -1: self.sr = SourceRef_DEFAULT
else: self.sr = SourceRef.from_FileHandle(self.__fh)
CharacterSetVsAction_BuilderBase.__init__(self,
("whitespace", "suspend", "newline", "suppressor", "bad"),
KeywordElseAdmissibleF=False)
def finalize(self):
# Finalize / Produce 'IndentationCount' object.
#
if self.sm_whitespace.get() is None:
self.sm_whitespace.set(self.__sm_whitespace_default(), SourceRef_DEFAULT)
if self.sm_newline.get() is None:
self.sm_newline.set(self.__sm_newline_default(), SourceRef_DEFAULT)
# -- consistency
self._consistency_check()
# Transform 'SourceRefObject' into 'Pattern_Prep' objects
# (TODO: Why not use it in the first place?)
def get_pattern(SRO):
if SRO is None or SRO.get() is None: return None
return Pattern_Prep(SRO.get(), PatternString="<indentation %s>" % SRO.name, Sr=SRO.sr)
pattern_suspend_list = [ get_pattern(sro) for sro in self.sm_suspend_list ]
pattern_suspend_list = [ x for x in pattern_suspend_list if x is not None ]
if self.sm_newline_suppressor.set_f():
sm_suppressed_newline = sequentialize.do([self.sm_newline_suppressor.get(),
self.sm_newline.get()])
sm_suppressed_newline = beautifier.do(sm_suppressed_newline)
pattern_suppressed_newline = Pattern_Prep(sm_suppressed_newline,
PatternString="<indentation suppressed newline>",
Sr=self.sm_newline_suppressor.sr)
else:
pattern_suppressed_newline = None
return IndentationCount_Pre(self.sr,
get_pattern(self.sm_whitespace),
get_pattern(self.sm_badspace),
get_pattern(self.sm_newline),
pattern_suppressed_newline,
pattern_suspend_list)
def requires_count(self):
return False
def specify(self, identifier, pattern, sr):
sm = pattern.extract_sm()
if identifier == "whitespace":
self.__specify(self.sm_whitespace, sm, sr)
elif identifier == "bad":
self.__specify(self.sm_badspace, sm, sr)
elif identifier == "newline":
self.__specify(self.sm_newline, sm, sr)
elif identifier == "suppressor":
self.__specify(self.sm_newline_suppressor, sm , sr)
elif identifier == "suspend":
self.__specify_suspend(sm, sr)
else:
return False
return True
@typed(sr=SourceRef)
def __specify(self, member_ref, Sm, sr):
assert Sm is not None
_error_if_defined_before(member_ref, sr)
if not Sm.is_DFA_compliant(): Sm = beautifier.do(Sm)
member_ref.set(Sm, sr)
@typed(sr=SourceRef)
def __specify_suspend(self, Sm, sr):
for before in self.sm_suspend_list:
if not identity.do(before.get(), Sm): continue
error.log("'suspend' has been defined before;", sr, DontExitF=True)
error.log("at this place.", before.sr)
sm_suspend = SourceRefObject("suspend", None)
self.__specify(sm_suspend, Sm, sr)
self.sm_suspend_list.append(sm_suspend)
def __sm_newline_default(self):
"""Default newline: '(\n)|(\r\n)'
"""
sm = DFA.from_character_set(NumberSet(ord('\n')))
if Setup.dos_carriage_return_newline_f:
sm.add_transition_sequence(sm.init_state_index, [ord('\r'), ord('\n')])
return sm
def __sm_whitespace_default(self):
"""Try to define default whitespace ' ' or '\t' if their positions
are not yet occupied in the count_command_map.
"""
sm_whitespace = DFA.from_character_set(NumberSet.from_integer_list([ord(' '), ord('\t')]))
sm_whitespace = beautifier.do(repeat.do(sm_whitespace, 1))
if self.sm_badspace.get() is not None:
sm_whitespace = difference.do(sm_whitespace, self.sm_badspace.get())
if sm_whitespace.is_Empty() \
or outrun.do(self.sm_badspace.get(), sm_whitespace):
error.log("Cannot define default 'whitespace' in the frame of the given\n"
"definition of 'bad'.", self.sm_badspace.sr)
return sm_whitespace
def _consistency_check(self):
"""
Required defintions:
-- WHITESPACE (Default done automatically) => Assert.
-- NEWLINE (Default done automatically) => Assert.
Inadmissible 'eat-into'.
-- SUPPRESSOR shall not eat into [NEWLINE]
-- NEWLINE shall not eat into [WHITESPACE, BADSPACE, SUSPEND, SUPPRESSOR]
-- WHITESPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
-- BADSPACE shall not eat into [SUPPRESSOR, NEWLINE, SUSPEND].
No common lexemes:
-- WHITESPACE and BADSPACE may not have common lexemes.
Outrun:
-- NEWLINE may not start with SUSPEND and vice versa
-- NEWLINE may not start with SUPPRESSOR and vice versa
-- SUPPRESSOR may not start with SUSPEND and vice versa
-- WHITESPACE shall not outrun BADSPACE, but the contrary is ok.
(BADSPACE may outrun WHITESPACE (e.g: lexeme with 'tab' after whitespace')
"""
# (1) Required definitions _____________________________________________
assert self.sm_whitespace.set_f()
assert self.sm_newline.set_f()
whitespace = self.sm_whitespace
newline = self.sm_newline
badspace = self.sm_badspace
suppressor = self.sm_newline_suppressor
suspend_list = self.sm_suspend_list
# (2) Inadmissible 'eat-into' __________________________________________
#
cmp_list = [
(newline, badspace), (newline, whitespace), (newline, suppressor),
(suppressor, newline),
(whitespace, newline), (whitespace, suppressor),
(badspace, newline), (badspace, suppressor),
] \
+ [ (whitespace, x) for x in suspend_list ] \
+ [ (newline, x) for x in suspend_list ] \
+ [ (badspace, x) for x in suspend_list ]
def _error(FormatStr, Sro0, Sro1):
error.log(FormatStr % (Sro0.name, Sro1.name), Sro0.sr, DontExitF=True)
error.log("'%s' defined here." % Sro1.name, Sro1.sr)
def _iterate(SroPairList):
for first_sro, second_sro in cmp_list:
first, second = first_sro.get(), second_sro.get()
if first is None or second is None: continue
yield first_sro, first, second_sro, second
for first_sro, first, second_sro, second in _iterate(cmp_list):
if swallow.ending_A_beginning_B(first, second):
_error("'%s' may eat into beginning of '%s'.", first_sro, second_sro)
elif swallow.inside_A_match_B(first, second):
_error("'%s' may swallow something matched by '%s'.", first_sro, second_sro)
for sm_suspend in self.sm_suspend_list:
only_common_f, \
common_f = tail.do(self.sm_newline.get(), sm_suspend.get())
error_check.tail(only_common_f, common_f,
"indentation handler's newline", self.sm_newline.sr,
"suspend", sm_suspend.sr)
# (3) Inadmissible common lexemes _____________________________________
#
if badspace.get() and not intersection.do([badspace.get(), whitespace.get()]).is_Empty():
_error("'%s' and '%s' match on common lexemes.", whitespace, badspace)
# (3) Inadmissible outruns ____________________________________________
#
cmp_list = [ (newline, suppressor), (suppressor, newline), (whitespace, badspace) ]
for x in suspend_list:
cmp_list.extend([
(newline, x), (x, newline),
(suppressor, x), (x, suppressor)
])
for first_sro, first, second_sro, second in _iterate(cmp_list):
if outrun.do(second, first):
_error("'%s' may outrun '%s'.", first_sro, second_sro)
def _parse_definition_head(fh, IdentifierList):
if check(fh, "\\default"):
error.log("'\\default' has been replaced by keyword '\\else' since quex 0.64.9!", fh)
elif check(fh, "\\else"):
pattern = None
else:
pattern = regular_expression.parse(fh, AllowPreContextF=False,
AllowPostContextF=False)
skip_whitespace(fh)
check_or_die(fh, "=>", " after character set definition.")
skip_whitespace(fh)
identifier = read_identifier(fh, OnMissingStr="Missing identifier following '=>'.")
error.verify_word_in_list(identifier, IdentifierList,
"Unrecognized specifier '%s'." % identifier, fh)
skip_whitespace(fh)
return pattern, identifier, SourceRef.from_FileHandle(fh)
def _read_value_specifier(fh, Keyword, Default=None):
skip_whitespace(fh)
value = read_integer(fh)
if value is not None: return value
# not a number received, is it an identifier?
variable = read_identifier(fh)
if variable: return variable
elif Default is not None: return Default
error.log("Missing integer or variable name after keyword '%s'." % Keyword, fh)
__CountActionMap_DEFAULT = None
def LineColumnCount_Default():
global __CountActionMap_DEFAULT
if __CountActionMap_DEFAULT is None:
builder = CountActionMap_Builder()
builder.add(NumberSet(ord('\n')), E_CharacterCountType.LINE, 1, SourceRef_DEFAULT)
builder.add(NumberSet(ord('\t')), E_CharacterCountType.GRID, 4, SourceRef_DEFAULT)
builder.define_else(E_CharacterCountType.COLUMN, 1, SourceRef_DEFAULT) # Define: "\else"
__CountActionMap_DEFAULT = builder.finalize(
Setup.buffer_encoding.source_set.minimum(),
Setup.buffer_encoding.source_set.least_greater_bound(), # Apply: "\else"
SourceRef_DEFAULT)
return __CountActionMap_DEFAULT
def _error_if_defined_before(Before, sr):
if not Before.set_f(): return
error.log("'%s' has been defined before;" % Before.name, sr,
DontExitF=True)
error.log("at this place.", Before.sr)
def _extract_trigger_set(sr, Keyword, Pattern):
if Pattern is None:
return None
elif isinstance(Pattern, NumberSet):
return Pattern
def check_can_be_matched_by_single_character(SM):
bad_f = False
init_state = SM.get_init_state()
if SM.get_init_state().is_acceptance():
bad_f = True
elif len(SM.states) != 2:
bad_f = True
# Init state MUST transit to second state. Second state MUST not have any transitions
elif len(init_state.target_map.get_target_state_index_list()) != 1:
bad_f = True
else:
tmp = set(SM.states.keys())
tmp.remove(SM.init_state_index)
other_state_index = next(iter(tmp))
if len(SM.states[other_state_index].target_map.get_target_state_index_list()) != 0:
bad_f = True
if bad_f:
error.log("For '%s' only patterns are addmissible which\n" % Keyword + \
"can be matched by a single character, e.g. \" \" or [a-z].", sr)
sm = Pattern.extract_sm()
check_can_be_matched_by_single_character(sm)
transition_map = sm.get_init_state().target_map.get_map()
assert len(transition_map) == 1
return list(transition_map.values())[0]
def _check_grid_values_integer_multiples(CaMap):
"""If there are no spaces and the grid is on a homogeneous scale,
=> then the grid can be transformed into 'easy-to-compute' spaces.
"""
grid_value_list = []
min_info = None
for character_set, info in CaMap:
if info.cc_type == E_CharacterCountType.COLUMN:
return
elif info.cc_type != E_CharacterCountType.GRID:
continue
elif type(info.value) in (str, str):
# If there is one single 'variable' grid value,
# then no assumptions can be made.
return
grid_value_list.append(info.value)
if min_info is None or info.value < min_info.value:
min_info = info
if min_info is None:
return
# Are all grid values a multiple of the minimum?
if all(x % min_info.value == 0 for x in grid_value_list):
error.warning("Setup does not contain spaces, only grids (tabulators). All grid\n" \
"widths are multiples of %i. The grid setup %s is equivalent to\n" \
% (min_info.value, repr(sorted(grid_value_list))[1:-1]) + \
"a setup with space counts %s. Space counts are faster to compute.\n" \
% repr([x / min_info.value for x in sorted(grid_value_list)])[1:-1],
min_info.sr)
return
def check_defined(CaMap, SourceReference, CCT):
"""Checks whether the character counter type has been defined in the
map.
THROWS: Error in case that is has not been defined.
"""
for character_set, info in CaMap:
if info.cc_type == CCT:
return
error.warning("Setup does not define '%s'." % cc_type_name_db[CCT], SourceReference,
SuppressCode=NotificationDB.warning_counter_setup_without_newline)
| 44.238589
| 111
| 0.60892
|
from quex.input.setup import NotificationDB
from quex.input.regular_expression.pattern import Pattern_Prep
import quex.input.regular_expression.core as regular_expression
from quex.input.code.base import SourceRef, \
SourceRef_DEFAULT, \
SourceRefObject
from quex.engine.state_machine.core import DFA
import quex.engine.state_machine.construction.sequentialize as sequentialize
import quex.engine.state_machine.construction.repeat as repeat
import quex.engine.state_machine.algebra.difference as difference
import quex.engine.state_machine.algebra.intersection as intersection
import quex.engine.state_machine.algorithm.beautifier as beautifier
import quex.engine.state_machine.check.swallow as swallow
import quex.engine.state_machine.check.outrun as outrun
import quex.engine.state_machine.check.identity as identity
import quex.engine.state_machine.check.tail as tail
from quex.engine.misc.tools import typed
from quex.engine.misc.interval_handling import NumberSet
from quex.engine.counter import IndentationCount_Pre, \
cc_type_name_db, \
cc_type_db
from quex.engine.counter_builder import CountActionMap_Builder
import quex.engine.misc.error as error
import quex.engine.misc.error_check as error_check
from quex.engine.misc.file_in import check, \
check_or_die, \
skip_whitespace, \
read_identifier, \
read_integer
from quex.constants import E_CharacterCountType
from quex.blackboard import setup as Setup
def parse_CountActionMap(fh):
return _base_parse(fh, CountActionMapFromParser_Builder(fh))
def parse_IndentationSetup(fh):
return _base_parse(fh, IndentationSetup_Builder(fh))
def _base_parse(fh, builder, IndentationSetupF=False):
while 1 + 1 == 2:
skip_whitespace(fh)
if check(fh, ">"):
break
pattern, identifier, sr = _parse_definition_head(fh, builder.identifier_list)
if pattern is None and not builder.keyword_else_f:
error.log("Keyword '\\else' cannot be used in indentation setup.", fh)
if builder.requires_count():
count = _read_value_specifier(fh, identifier, 1)
builder.specify(identifier, pattern, count, sr)
else:
builder.specify(identifier, pattern, sr)
if not check(fh, ";"):
error.log("Missing ';' after '%s' specification." % identifier, fh)
return builder.finalize()
class CharacterSetVsAction_BuilderBase:
def __init__(self, IdentifierList, KeywordElseAdmissibleF):
self.identifier_list = IdentifierList
self.keyword_else_f = KeywordElseAdmissibleF
class CountActionMapFromParser_Builder(CharacterSetVsAction_BuilderBase):
@typed(sr=SourceRef)
def __init__(self, fh):
self.sr = SourceRef.from_FileHandle(fh)
self.__fh = fh
self._ca_map_builder = CountActionMap_Builder()
CharacterSetVsAction_BuilderBase.__init__(self,
("columns", "grid", "lines"),
KeywordElseAdmissibleF=True)
def finalize(self):
ca_map = self._ca_map_builder.finalize(
Setup.buffer_encoding.source_set.minimum(),
Setup.buffer_encoding.source_set.least_greater_bound(),
self.sr)
_check_grid_values_integer_multiples(ca_map)
check_defined(ca_map, self.sr, E_CharacterCountType.LINE)
return ca_map
def requires_count(self):
return True
@typed(sr=SourceRef, Identifier=(str,str))
def specify(self, Identifier, Pattern, Count, sr):
if Pattern is None:
self._ca_map_builder.define_else(cc_type_db[Identifier], Count, sr)
else:
trigger_set = _extract_trigger_set(sr, Identifier, Pattern)
self._ca_map_builder.add(trigger_set, cc_type_db[Identifier], Count, sr)
class IndentationSetup_Builder(CharacterSetVsAction_BuilderBase):
@typed(sr=SourceRef)
def __init__(self, fh):
self.__fh = fh
self.sm_whitespace = SourceRefObject("whitespace", None)
self.sm_badspace = SourceRefObject("bad", None)
self.sm_newline = SourceRefObject("newline", None)
self.sm_newline_suppressor = SourceRefObject("suppressor", None)
self.sm_suspend_list = []
if fh == -1: self.sr = SourceRef_DEFAULT
else: self.sr = SourceRef.from_FileHandle(self.__fh)
CharacterSetVsAction_BuilderBase.__init__(self,
("whitespace", "suspend", "newline", "suppressor", "bad"),
KeywordElseAdmissibleF=False)
def finalize(self):
if self.sm_whitespace.get() is None:
self.sm_whitespace.set(self.__sm_whitespace_default(), SourceRef_DEFAULT)
if self.sm_newline.get() is None:
self.sm_newline.set(self.__sm_newline_default(), SourceRef_DEFAULT)
self._consistency_check()
def get_pattern(SRO):
if SRO is None or SRO.get() is None: return None
return Pattern_Prep(SRO.get(), PatternString="<indentation %s>" % SRO.name, Sr=SRO.sr)
pattern_suspend_list = [ get_pattern(sro) for sro in self.sm_suspend_list ]
pattern_suspend_list = [ x for x in pattern_suspend_list if x is not None ]
if self.sm_newline_suppressor.set_f():
sm_suppressed_newline = sequentialize.do([self.sm_newline_suppressor.get(),
self.sm_newline.get()])
sm_suppressed_newline = beautifier.do(sm_suppressed_newline)
pattern_suppressed_newline = Pattern_Prep(sm_suppressed_newline,
PatternString="<indentation suppressed newline>",
Sr=self.sm_newline_suppressor.sr)
else:
pattern_suppressed_newline = None
return IndentationCount_Pre(self.sr,
get_pattern(self.sm_whitespace),
get_pattern(self.sm_badspace),
get_pattern(self.sm_newline),
pattern_suppressed_newline,
pattern_suspend_list)
def requires_count(self):
return False
def specify(self, identifier, pattern, sr):
sm = pattern.extract_sm()
if identifier == "whitespace":
self.__specify(self.sm_whitespace, sm, sr)
elif identifier == "bad":
self.__specify(self.sm_badspace, sm, sr)
elif identifier == "newline":
self.__specify(self.sm_newline, sm, sr)
elif identifier == "suppressor":
self.__specify(self.sm_newline_suppressor, sm , sr)
elif identifier == "suspend":
self.__specify_suspend(sm, sr)
else:
return False
return True
@typed(sr=SourceRef)
def __specify(self, member_ref, Sm, sr):
assert Sm is not None
_error_if_defined_before(member_ref, sr)
if not Sm.is_DFA_compliant(): Sm = beautifier.do(Sm)
member_ref.set(Sm, sr)
@typed(sr=SourceRef)
def __specify_suspend(self, Sm, sr):
for before in self.sm_suspend_list:
if not identity.do(before.get(), Sm): continue
error.log("'suspend' has been defined before;", sr, DontExitF=True)
error.log("at this place.", before.sr)
sm_suspend = SourceRefObject("suspend", None)
self.__specify(sm_suspend, Sm, sr)
self.sm_suspend_list.append(sm_suspend)
def __sm_newline_default(self):
sm = DFA.from_character_set(NumberSet(ord('\n')))
if Setup.dos_carriage_return_newline_f:
sm.add_transition_sequence(sm.init_state_index, [ord('\r'), ord('\n')])
return sm
def __sm_whitespace_default(self):
sm_whitespace = DFA.from_character_set(NumberSet.from_integer_list([ord(' '), ord('\t')]))
sm_whitespace = beautifier.do(repeat.do(sm_whitespace, 1))
if self.sm_badspace.get() is not None:
sm_whitespace = difference.do(sm_whitespace, self.sm_badspace.get())
if sm_whitespace.is_Empty() \
or outrun.do(self.sm_badspace.get(), sm_whitespace):
error.log("Cannot define default 'whitespace' in the frame of the given\n"
"definition of 'bad'.", self.sm_badspace.sr)
return sm_whitespace
def _consistency_check(self):
assert self.sm_whitespace.set_f()
assert self.sm_newline.set_f()
whitespace = self.sm_whitespace
newline = self.sm_newline
badspace = self.sm_badspace
suppressor = self.sm_newline_suppressor
suspend_list = self.sm_suspend_list
cmp_list = [
(newline, badspace), (newline, whitespace), (newline, suppressor),
(suppressor, newline),
(whitespace, newline), (whitespace, suppressor),
(badspace, newline), (badspace, suppressor),
] \
+ [ (whitespace, x) for x in suspend_list ] \
+ [ (newline, x) for x in suspend_list ] \
+ [ (badspace, x) for x in suspend_list ]
def _error(FormatStr, Sro0, Sro1):
error.log(FormatStr % (Sro0.name, Sro1.name), Sro0.sr, DontExitF=True)
error.log("'%s' defined here." % Sro1.name, Sro1.sr)
def _iterate(SroPairList):
for first_sro, second_sro in cmp_list:
first, second = first_sro.get(), second_sro.get()
if first is None or second is None: continue
yield first_sro, first, second_sro, second
for first_sro, first, second_sro, second in _iterate(cmp_list):
if swallow.ending_A_beginning_B(first, second):
_error("'%s' may eat into beginning of '%s'.", first_sro, second_sro)
elif swallow.inside_A_match_B(first, second):
_error("'%s' may swallow something matched by '%s'.", first_sro, second_sro)
for sm_suspend in self.sm_suspend_list:
only_common_f, \
common_f = tail.do(self.sm_newline.get(), sm_suspend.get())
error_check.tail(only_common_f, common_f,
"indentation handler's newline", self.sm_newline.sr,
"suspend", sm_suspend.sr)
# (3) Inadmissible common lexemes _____________________________________
#
if badspace.get() and not intersection.do([badspace.get(), whitespace.get()]).is_Empty():
_error("'%s' and '%s' match on common lexemes.", whitespace, badspace)
# (3) Inadmissible outruns ____________________________________________
#
cmp_list = [ (newline, suppressor), (suppressor, newline), (whitespace, badspace) ]
for x in suspend_list:
cmp_list.extend([
(newline, x), (x, newline),
(suppressor, x), (x, suppressor)
])
for first_sro, first, second_sro, second in _iterate(cmp_list):
if outrun.do(second, first):
_error("'%s' may outrun '%s'.", first_sro, second_sro)
def _parse_definition_head(fh, IdentifierList):
if check(fh, "\\default"):
error.log("'\\default' has been replaced by keyword '\\else' since quex 0.64.9!", fh)
elif check(fh, "\\else"):
pattern = None
else:
pattern = regular_expression.parse(fh, AllowPreContextF=False,
AllowPostContextF=False)
skip_whitespace(fh)
check_or_die(fh, "=>", " after character set definition.")
skip_whitespace(fh)
identifier = read_identifier(fh, OnMissingStr="Missing identifier following '=>'.")
error.verify_word_in_list(identifier, IdentifierList,
"Unrecognized specifier '%s'." % identifier, fh)
skip_whitespace(fh)
return pattern, identifier, SourceRef.from_FileHandle(fh)
def _read_value_specifier(fh, Keyword, Default=None):
skip_whitespace(fh)
value = read_integer(fh)
if value is not None: return value
# not a number received, is it an identifier?
variable = read_identifier(fh)
if variable: return variable
elif Default is not None: return Default
error.log("Missing integer or variable name after keyword '%s'." % Keyword, fh)
__CountActionMap_DEFAULT = None
def LineColumnCount_Default():
global __CountActionMap_DEFAULT
if __CountActionMap_DEFAULT is None:
builder = CountActionMap_Builder()
builder.add(NumberSet(ord('\n')), E_CharacterCountType.LINE, 1, SourceRef_DEFAULT)
builder.add(NumberSet(ord('\t')), E_CharacterCountType.GRID, 4, SourceRef_DEFAULT)
builder.define_else(E_CharacterCountType.COLUMN, 1, SourceRef_DEFAULT) # Define: "\else"
__CountActionMap_DEFAULT = builder.finalize(
Setup.buffer_encoding.source_set.minimum(),
Setup.buffer_encoding.source_set.least_greater_bound(), # Apply: "\else"
SourceRef_DEFAULT)
return __CountActionMap_DEFAULT
def _error_if_defined_before(Before, sr):
if not Before.set_f(): return
error.log("'%s' has been defined before;" % Before.name, sr,
DontExitF=True)
error.log("at this place.", Before.sr)
def _extract_trigger_set(sr, Keyword, Pattern):
if Pattern is None:
return None
elif isinstance(Pattern, NumberSet):
return Pattern
def check_can_be_matched_by_single_character(SM):
bad_f = False
init_state = SM.get_init_state()
if SM.get_init_state().is_acceptance():
bad_f = True
elif len(SM.states) != 2:
bad_f = True
# Init state MUST transit to second state. Second state MUST not have any transitions
elif len(init_state.target_map.get_target_state_index_list()) != 1:
bad_f = True
else:
tmp = set(SM.states.keys())
tmp.remove(SM.init_state_index)
other_state_index = next(iter(tmp))
if len(SM.states[other_state_index].target_map.get_target_state_index_list()) != 0:
bad_f = True
if bad_f:
error.log("For '%s' only patterns are addmissible which\n" % Keyword + \
"can be matched by a single character, e.g. \" \" or [a-z].", sr)
sm = Pattern.extract_sm()
check_can_be_matched_by_single_character(sm)
transition_map = sm.get_init_state().target_map.get_map()
assert len(transition_map) == 1
return list(transition_map.values())[0]
def _check_grid_values_integer_multiples(CaMap):
grid_value_list = []
min_info = None
for character_set, info in CaMap:
if info.cc_type == E_CharacterCountType.COLUMN:
return
elif info.cc_type != E_CharacterCountType.GRID:
continue
elif type(info.value) in (str, str):
# If there is one single 'variable' grid value,
# then no assumptions can be made.
return
grid_value_list.append(info.value)
if min_info is None or info.value < min_info.value:
min_info = info
if min_info is None:
return
# Are all grid values a multiple of the minimum?
if all(x % min_info.value == 0 for x in grid_value_list):
error.warning("Setup does not contain spaces, only grids (tabulators). All grid\n" \
"widths are multiples of %i. The grid setup %s is equivalent to\n" \
% (min_info.value, repr(sorted(grid_value_list))[1:-1]) + \
"a setup with space counts %s. Space counts are faster to compute.\n" \
% repr([x / min_info.value for x in sorted(grid_value_list)])[1:-1],
min_info.sr)
return
def check_defined(CaMap, SourceReference, CCT):
for character_set, info in CaMap:
if info.cc_type == CCT:
return
error.warning("Setup does not define '%s'." % cc_type_name_db[CCT], SourceReference,
SuppressCode=NotificationDB.warning_counter_setup_without_newline)
| true
| true
|
7907242b23cf204f4c037253cd0304a004a2efb1
| 6,371
|
py
|
Python
|
contrib/discodex/lib/discodex/models.py
|
kostis/disco
|
200ca4afef9851139b122928e409d1d3186be646
|
[
"BSD-3-Clause"
] | 1
|
2016-08-23T06:45:18.000Z
|
2016-08-23T06:45:18.000Z
|
contrib/discodex/lib/discodex/models.py
|
dimazest/disco
|
9175f863d6f83f2a918c851c9eed88019adf7f24
|
[
"BSD-3-Clause"
] | null | null | null |
contrib/discodex/lib/discodex/models.py
|
dimazest/disco
|
9175f863d6f83f2a918c851c9eed88019adf7f24
|
[
"BSD-3-Clause"
] | null | null | null |
import errno, os
from django.db import models
from django.http import Http404, HttpResponseServerError
from discodex.restapi.resource import Resource, Collection
from discodex.restapi.resource import (HttpResponseAccepted,
HttpResponseCreated,
HttpResponseNoContent,
HttpResponseServiceUnavailable)
from discodex import settings
from discodex.mapreduce import (Indexer,
DiscoDBIterator)
from discodex.objects import (DataSet,
IChunks,
Indices,
Index,
Results,
Dict)
from disco.core import Disco
from disco.ddfs import DDFS
from disco.error import DiscoError
from disco.util import flatten, parse_dir
discodex_settings = settings.DiscodexSettings()
disco_master_url = discodex_settings['DISCODEX_DISCO_MASTER']
disco_prefix = discodex_settings['DISCODEX_DISCO_PREFIX']
index_prefix = discodex_settings['DISCODEX_INDEX_PREFIX']
purge_file = discodex_settings['DISCODEX_PURGE_FILE']
disco_master = Disco(disco_master_url)
ddfs = DDFS(disco_master_url)
NOT_FOUND, OK, ACTIVE, DEAD = 'unknown job', 'ready', 'active', 'dead'
class IndexCollection(Collection):
allowed_methods = ('GET', 'POST')
def delegate(self, request, *args, **kwargs):
name = str(kwargs.pop('name'))
return IndexResource(name)(request, *args, **kwargs)
@property
def names(self):
return ddfs.list(index_prefix)
def __iter__(self):
for name in self.names:
yield IndexResource(name)
def create(self, request, *args, **kwargs):
dataset = DataSet.loads(request.raw_post_data)
prefix = '%s:discodb:' % disco_prefix
job = Indexer(disco_master, prefix, dataset)
try:
job.run()
except ImportError, e:
return HttpResponseServerError("Callable object not found: %s" % e)
except DiscoError, e:
return HttpResponseServerError("Failed to run indexing job: %s" % e)
return HttpResponseAccepted(job.name)
def read(self, request, *args, **kwargs):
return Indices(self.names).response(request)
class IndexResource(Collection):
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
def __init__(self, name):
self.name = name
self.responses['POST'] = 'append'
def delegate(self, request, *args, **kwargs):
if self.status == NOT_FOUND:
raise Http404
return DiscoDBResource(self)(request, *args, **kwargs)
@property
def exists(self):
return ddfs.exists(self.tag)
@property
def isdisco(self):
return self.name.startswith(disco_prefix)
@property
def isindex(self):
return self.name.startswith(index_prefix)
@property
def jobname(self):
if self.isdisco:
return self.name
if self.isindex:
return self.name.replace(index_prefix, disco_prefix, 1)
return '%s:%s' % (disco_prefix, self.name)
@property
def tag(self):
return self.jobname.replace(disco_prefix, index_prefix, 1)
@property
@models.permalink
def url(self):
return 'index', (), {'name': self.name}
@property
def ichunks(self):
return ddfs.blobs(self.tag)
@property
def status(self):
if self.exists:
return OK
if self.isdisco:
status, results = disco_master.results(self.name)
if status == OK:
_prefix, type, id = self.name.split(':', 2)
ddfs.put(self.tag, [[url.replace('disco://', '%s://' % type, 1)
for url in urls]
for urls in ddfs.blobs(results)])
disco_master.purge(self.jobname)
return status
return NOT_FOUND
def read(self, request, *args, **kwargs):
status = self.status
if status == OK:
return Index(ddfs.get(self.tag)).response(request)
if status == ACTIVE:
return HttpResponseServiceUnavailable(2)
if status == DEAD:
return HttpResponseServerError("Indexing failed.")
raise Http404
def append(self, request, *args, **kwargs):
ddfs.tag(self.tag, [['tag://%s' % IndexResource(request.raw_post_data).tag]])
return HttpResponseCreated(self.url)
def update(self, request, *args, **kwargs):
ddfs.put(self.tag, IChunks.loads(request.raw_post_data))
return HttpResponseCreated(self.url)
def delete(self, request, *args, **kwargs):
ddfs.delete(self.tag)
ddfs.delete(ddfs.job_tag(self.jobname))
return HttpResponseNoContent()
class DiscoDBResource(Resource):
allowed_methods = ('GET', 'POST')
def __init__(self, index):
self.index = index
def read(self, request, *args, **kwargs):
from discodex.mapreduce.func import reify
method = str(kwargs.pop('method', None) or '')
arg = str(kwargs.pop('arg', None) or '')
streams = [reify(s) for s in kwargs.pop('streams').split('|') if s]
reduce = reify((kwargs.pop('reduce') or 'None').strip('}'))
try:
job = DiscoDBIterator(disco_master,
disco_prefix,
self.index,
method,
arg,
streams,
reduce,
**dict(request.GET.items())).run()
except DiscoError, e:
return HttpResponseServerError("Failed to run DiscoDB job: %s" % e)
try:
results = Results(job.results)
except DiscoError, e:
return HttpResponseServerError("DiscoDB job failed: %s" % e)
finally:
if os.path.exists(purge_file):
disco_master.purge(job.name)
return results.response(request)
def create(self, request, *args, **kwargs):
kwargs.update(Dict.loads(request.raw_post_data))
return self.read(request, *args, **kwargs)
| 33.182292
| 85
| 0.578402
|
import errno, os
from django.db import models
from django.http import Http404, HttpResponseServerError
from discodex.restapi.resource import Resource, Collection
from discodex.restapi.resource import (HttpResponseAccepted,
HttpResponseCreated,
HttpResponseNoContent,
HttpResponseServiceUnavailable)
from discodex import settings
from discodex.mapreduce import (Indexer,
DiscoDBIterator)
from discodex.objects import (DataSet,
IChunks,
Indices,
Index,
Results,
Dict)
from disco.core import Disco
from disco.ddfs import DDFS
from disco.error import DiscoError
from disco.util import flatten, parse_dir
discodex_settings = settings.DiscodexSettings()
disco_master_url = discodex_settings['DISCODEX_DISCO_MASTER']
disco_prefix = discodex_settings['DISCODEX_DISCO_PREFIX']
index_prefix = discodex_settings['DISCODEX_INDEX_PREFIX']
purge_file = discodex_settings['DISCODEX_PURGE_FILE']
disco_master = Disco(disco_master_url)
ddfs = DDFS(disco_master_url)
NOT_FOUND, OK, ACTIVE, DEAD = 'unknown job', 'ready', 'active', 'dead'
class IndexCollection(Collection):
allowed_methods = ('GET', 'POST')
def delegate(self, request, *args, **kwargs):
name = str(kwargs.pop('name'))
return IndexResource(name)(request, *args, **kwargs)
@property
def names(self):
return ddfs.list(index_prefix)
def __iter__(self):
for name in self.names:
yield IndexResource(name)
def create(self, request, *args, **kwargs):
dataset = DataSet.loads(request.raw_post_data)
prefix = '%s:discodb:' % disco_prefix
job = Indexer(disco_master, prefix, dataset)
try:
job.run()
except ImportError, e:
return HttpResponseServerError("Callable object not found: %s" % e)
except DiscoError, e:
return HttpResponseServerError("Failed to run indexing job: %s" % e)
return HttpResponseAccepted(job.name)
def read(self, request, *args, **kwargs):
return Indices(self.names).response(request)
class IndexResource(Collection):
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
def __init__(self, name):
self.name = name
self.responses['POST'] = 'append'
def delegate(self, request, *args, **kwargs):
if self.status == NOT_FOUND:
raise Http404
return DiscoDBResource(self)(request, *args, **kwargs)
@property
def exists(self):
return ddfs.exists(self.tag)
@property
def isdisco(self):
return self.name.startswith(disco_prefix)
@property
def isindex(self):
return self.name.startswith(index_prefix)
@property
def jobname(self):
if self.isdisco:
return self.name
if self.isindex:
return self.name.replace(index_prefix, disco_prefix, 1)
return '%s:%s' % (disco_prefix, self.name)
@property
def tag(self):
return self.jobname.replace(disco_prefix, index_prefix, 1)
@property
@models.permalink
def url(self):
return 'index', (), {'name': self.name}
@property
def ichunks(self):
return ddfs.blobs(self.tag)
@property
def status(self):
if self.exists:
return OK
if self.isdisco:
status, results = disco_master.results(self.name)
if status == OK:
_prefix, type, id = self.name.split(':', 2)
ddfs.put(self.tag, [[url.replace('disco://', '%s://' % type, 1)
for url in urls]
for urls in ddfs.blobs(results)])
disco_master.purge(self.jobname)
return status
return NOT_FOUND
def read(self, request, *args, **kwargs):
status = self.status
if status == OK:
return Index(ddfs.get(self.tag)).response(request)
if status == ACTIVE:
return HttpResponseServiceUnavailable(2)
if status == DEAD:
return HttpResponseServerError("Indexing failed.")
raise Http404
def append(self, request, *args, **kwargs):
ddfs.tag(self.tag, [['tag://%s' % IndexResource(request.raw_post_data).tag]])
return HttpResponseCreated(self.url)
def update(self, request, *args, **kwargs):
ddfs.put(self.tag, IChunks.loads(request.raw_post_data))
return HttpResponseCreated(self.url)
def delete(self, request, *args, **kwargs):
ddfs.delete(self.tag)
ddfs.delete(ddfs.job_tag(self.jobname))
return HttpResponseNoContent()
class DiscoDBResource(Resource):
allowed_methods = ('GET', 'POST')
def __init__(self, index):
self.index = index
def read(self, request, *args, **kwargs):
from discodex.mapreduce.func import reify
method = str(kwargs.pop('method', None) or '')
arg = str(kwargs.pop('arg', None) or '')
streams = [reify(s) for s in kwargs.pop('streams').split('|') if s]
reduce = reify((kwargs.pop('reduce') or 'None').strip('}'))
try:
job = DiscoDBIterator(disco_master,
disco_prefix,
self.index,
method,
arg,
streams,
reduce,
**dict(request.GET.items())).run()
except DiscoError, e:
return HttpResponseServerError("Failed to run DiscoDB job: %s" % e)
try:
results = Results(job.results)
except DiscoError, e:
return HttpResponseServerError("DiscoDB job failed: %s" % e)
finally:
if os.path.exists(purge_file):
disco_master.purge(job.name)
return results.response(request)
def create(self, request, *args, **kwargs):
kwargs.update(Dict.loads(request.raw_post_data))
return self.read(request, *args, **kwargs)
| false
| true
|
7907243674e9e866161964f1907b28118b6c5588
| 7,238
|
py
|
Python
|
test/functional/test_f_xcompat.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 95
|
2018-08-20T23:10:00.000Z
|
2022-02-17T02:54:32.000Z
|
test/functional/test_f_xcompat.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 220
|
2018-08-01T20:56:29.000Z
|
2022-03-28T18:12:35.000Z
|
test/functional/test_f_xcompat.py
|
farleyb-amazon/aws-encryption-sdk-python
|
7950abd73ee333407d2dadd02ef2d57c3df464cf
|
[
"Apache-2.0"
] | 63
|
2018-08-01T19:37:33.000Z
|
2022-03-20T17:14:15.000Z
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Functional test suite testing decryption of known good test files encrypted using static RawMasterKeyProvider."""
import base64
import json
import logging
import os
import sys
from collections import defaultdict
import attr
import pytest
import six
import aws_encryption_sdk
from aws_encryption_sdk.exceptions import InvalidKeyIdError
from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm
from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey
from aws_encryption_sdk.internal.str_ops import to_bytes
from aws_encryption_sdk.key_providers.raw import RawMasterKeyProvider
pytestmark = [pytest.mark.accept]
# Environment-specific test file locator. May not always exist.
def _file_root():
return "."
try:
from .aws_test_file_finder import file_root
except ImportError:
file_root = _file_root
_LOGGER = logging.getLogger()
_WRAPPING_ALGORITHM_MAP = {
b"AES": {
128: {b"": {b"": WrappingAlgorithm.AES_128_GCM_IV12_TAG16_NO_PADDING}},
192: {b"": {b"": WrappingAlgorithm.AES_192_GCM_IV12_TAG16_NO_PADDING}},
256: {b"": {b"": WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING}},
},
b"RSA": defaultdict(
lambda: {
b"PKCS1": {b"": WrappingAlgorithm.RSA_PKCS1},
b"OAEP-MGF1": {
b"SHA-1": WrappingAlgorithm.RSA_OAEP_SHA1_MGF1,
b"SHA-256": WrappingAlgorithm.RSA_OAEP_SHA256_MGF1,
b"SHA-384": WrappingAlgorithm.RSA_OAEP_SHA384_MGF1,
b"SHA-512": WrappingAlgorithm.RSA_OAEP_SHA512_MGF1,
},
}
),
}
_KEY_TYPES_MAP = {b"AES": EncryptionKeyType.SYMMETRIC, b"RSA": EncryptionKeyType.PRIVATE}
_STATIC_KEYS = defaultdict(dict)
class StaticStoredMasterKeyProvider(RawMasterKeyProvider):
"""Provides static key"""
provider_id = "static-aws-xcompat"
def _get_raw_key(self, key_id):
"""Finds a loaded raw key."""
try:
algorithm, key_bits, padding_algorithm, padding_hash = key_id.upper().split(b".", 3)
key_bits = int(key_bits)
key_type = _KEY_TYPES_MAP[algorithm]
wrapping_algorithm = _WRAPPING_ALGORITHM_MAP[algorithm][key_bits][padding_algorithm][padding_hash]
static_key = _STATIC_KEYS[algorithm][key_bits]
return WrappingKey(
wrapping_algorithm=wrapping_algorithm, wrapping_key=static_key, wrapping_key_type=key_type
)
except KeyError:
_LOGGER.exception("Unknown Key ID: %s", key_id)
raise InvalidKeyIdError("Unknown Key ID: {}".format(key_id))
@attr.s
class RawKeyDescription(object):
"""Customer raw key descriptor used by StaticStoredMasterKeyProvider."""
encryption_algorithm = attr.ib(validator=attr.validators.instance_of(six.string_types))
key_bits = attr.ib(validator=attr.validators.instance_of(int))
padding_algorithm = attr.ib(validator=attr.validators.instance_of(six.string_types))
padding_hash = attr.ib(validator=attr.validators.instance_of(six.string_types))
@property
def key_id(self):
"""Build a key ID from instance parameters."""
return ".".join([self.encryption_algorithm, str(self.key_bits), self.padding_algorithm, self.padding_hash])
@attr.s
class Scenario(object):
"""Scenario details."""
plaintext_filename = attr.ib(validator=attr.validators.instance_of(six.string_types))
ciphertext_filename = attr.ib(validator=attr.validators.instance_of(six.string_types))
key_ids = attr.ib(validator=attr.validators.instance_of(list))
def _generate_test_cases(): # noqa=C901
try:
root_dir = os.path.abspath(file_root())
except Exception: # pylint: disable=broad-except
root_dir = os.getcwd()
if not os.path.isdir(root_dir):
root_dir = os.getcwd()
base_dir = os.path.join(root_dir, "aws_encryption_sdk_resources")
ciphertext_manifest_path = os.path.join(base_dir, "manifests", "ciphertext.manifest")
if not os.path.isfile(ciphertext_manifest_path):
# Make no test cases if the ciphertext file is not found
return []
with open(ciphertext_manifest_path, encoding="utf-8") as f:
ciphertext_manifest = json.load(f)
_test_cases = []
# Collect keys from ciphertext manifest
for algorithm, keys in ciphertext_manifest["test_keys"].items():
algorithm = to_bytes(algorithm.upper())
for key_bits, key_desc in keys.items():
key_desc = to_bytes(key_desc)
key_bits = int(key_bits)
raw_key = to_bytes(key_desc.get("line_separator", "").join(key_desc["key"]))
if key_desc["encoding"].lower() in ("raw", "pem"):
_STATIC_KEYS[algorithm][key_bits] = raw_key
elif key_desc["encoding"].lower() == "base64":
_STATIC_KEYS[algorithm][key_bits] = base64.b64decode(raw_key)
else:
raise Exception("TODO" + "Unknown key encoding")
# Collect test cases from ciphertext manifest
for test_case in ciphertext_manifest["test_cases"]:
key_ids = []
algorithm = aws_encryption_sdk.Algorithm.get_by_id(int(test_case["algorithm"], 16))
for key in test_case["master_keys"]:
sys.stderr.write("XC:: " + json.dumps(key) + "\n")
if key["provider_id"] == StaticStoredMasterKeyProvider.provider_id:
key_ids.append(
RawKeyDescription(
key["encryption_algorithm"],
key.get("key_bits", algorithm.data_key_len * 8),
key.get("padding_algorithm", ""),
key.get("padding_hash", ""),
).key_id
)
if key_ids:
_test_cases.append(
Scenario(
os.path.join(base_dir, test_case["plaintext"]["filename"]),
os.path.join(base_dir, test_case["ciphertext"]["filename"]),
key_ids,
)
)
return _test_cases
@pytest.mark.parametrize("scenario", _generate_test_cases())
def test_decrypt_from_file(scenario):
"""Tests decrypt from known good files."""
with open(scenario.ciphertext_filename, "rb") as infile:
ciphertext = infile.read()
with open(scenario.plaintext_filename, "rb") as infile:
plaintext = infile.read()
key_provider = StaticStoredMasterKeyProvider()
key_provider.add_master_keys_from_list(scenario.key_ids)
decrypted_ciphertext, _header = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=key_provider)
assert decrypted_ciphertext == plaintext
| 39.336957
| 116
| 0.678088
|
import base64
import json
import logging
import os
import sys
from collections import defaultdict
import attr
import pytest
import six
import aws_encryption_sdk
from aws_encryption_sdk.exceptions import InvalidKeyIdError
from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm
from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey
from aws_encryption_sdk.internal.str_ops import to_bytes
from aws_encryption_sdk.key_providers.raw import RawMasterKeyProvider
pytestmark = [pytest.mark.accept]
def _file_root():
return "."
try:
from .aws_test_file_finder import file_root
except ImportError:
file_root = _file_root
_LOGGER = logging.getLogger()
_WRAPPING_ALGORITHM_MAP = {
b"AES": {
128: {b"": {b"": WrappingAlgorithm.AES_128_GCM_IV12_TAG16_NO_PADDING}},
192: {b"": {b"": WrappingAlgorithm.AES_192_GCM_IV12_TAG16_NO_PADDING}},
256: {b"": {b"": WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING}},
},
b"RSA": defaultdict(
lambda: {
b"PKCS1": {b"": WrappingAlgorithm.RSA_PKCS1},
b"OAEP-MGF1": {
b"SHA-1": WrappingAlgorithm.RSA_OAEP_SHA1_MGF1,
b"SHA-256": WrappingAlgorithm.RSA_OAEP_SHA256_MGF1,
b"SHA-384": WrappingAlgorithm.RSA_OAEP_SHA384_MGF1,
b"SHA-512": WrappingAlgorithm.RSA_OAEP_SHA512_MGF1,
},
}
),
}
_KEY_TYPES_MAP = {b"AES": EncryptionKeyType.SYMMETRIC, b"RSA": EncryptionKeyType.PRIVATE}
_STATIC_KEYS = defaultdict(dict)
class StaticStoredMasterKeyProvider(RawMasterKeyProvider):
provider_id = "static-aws-xcompat"
def _get_raw_key(self, key_id):
try:
algorithm, key_bits, padding_algorithm, padding_hash = key_id.upper().split(b".", 3)
key_bits = int(key_bits)
key_type = _KEY_TYPES_MAP[algorithm]
wrapping_algorithm = _WRAPPING_ALGORITHM_MAP[algorithm][key_bits][padding_algorithm][padding_hash]
static_key = _STATIC_KEYS[algorithm][key_bits]
return WrappingKey(
wrapping_algorithm=wrapping_algorithm, wrapping_key=static_key, wrapping_key_type=key_type
)
except KeyError:
_LOGGER.exception("Unknown Key ID: %s", key_id)
raise InvalidKeyIdError("Unknown Key ID: {}".format(key_id))
@attr.s
class RawKeyDescription(object):
encryption_algorithm = attr.ib(validator=attr.validators.instance_of(six.string_types))
key_bits = attr.ib(validator=attr.validators.instance_of(int))
padding_algorithm = attr.ib(validator=attr.validators.instance_of(six.string_types))
padding_hash = attr.ib(validator=attr.validators.instance_of(six.string_types))
@property
def key_id(self):
return ".".join([self.encryption_algorithm, str(self.key_bits), self.padding_algorithm, self.padding_hash])
@attr.s
class Scenario(object):
plaintext_filename = attr.ib(validator=attr.validators.instance_of(six.string_types))
ciphertext_filename = attr.ib(validator=attr.validators.instance_of(six.string_types))
key_ids = attr.ib(validator=attr.validators.instance_of(list))
def _generate_test_cases():
try:
root_dir = os.path.abspath(file_root())
except Exception:
root_dir = os.getcwd()
if not os.path.isdir(root_dir):
root_dir = os.getcwd()
base_dir = os.path.join(root_dir, "aws_encryption_sdk_resources")
ciphertext_manifest_path = os.path.join(base_dir, "manifests", "ciphertext.manifest")
if not os.path.isfile(ciphertext_manifest_path):
return []
with open(ciphertext_manifest_path, encoding="utf-8") as f:
ciphertext_manifest = json.load(f)
_test_cases = []
for algorithm, keys in ciphertext_manifest["test_keys"].items():
algorithm = to_bytes(algorithm.upper())
for key_bits, key_desc in keys.items():
key_desc = to_bytes(key_desc)
key_bits = int(key_bits)
raw_key = to_bytes(key_desc.get("line_separator", "").join(key_desc["key"]))
if key_desc["encoding"].lower() in ("raw", "pem"):
_STATIC_KEYS[algorithm][key_bits] = raw_key
elif key_desc["encoding"].lower() == "base64":
_STATIC_KEYS[algorithm][key_bits] = base64.b64decode(raw_key)
else:
raise Exception("TODO" + "Unknown key encoding")
for test_case in ciphertext_manifest["test_cases"]:
key_ids = []
algorithm = aws_encryption_sdk.Algorithm.get_by_id(int(test_case["algorithm"], 16))
for key in test_case["master_keys"]:
sys.stderr.write("XC:: " + json.dumps(key) + "\n")
if key["provider_id"] == StaticStoredMasterKeyProvider.provider_id:
key_ids.append(
RawKeyDescription(
key["encryption_algorithm"],
key.get("key_bits", algorithm.data_key_len * 8),
key.get("padding_algorithm", ""),
key.get("padding_hash", ""),
).key_id
)
if key_ids:
_test_cases.append(
Scenario(
os.path.join(base_dir, test_case["plaintext"]["filename"]),
os.path.join(base_dir, test_case["ciphertext"]["filename"]),
key_ids,
)
)
return _test_cases
@pytest.mark.parametrize("scenario", _generate_test_cases())
def test_decrypt_from_file(scenario):
with open(scenario.ciphertext_filename, "rb") as infile:
ciphertext = infile.read()
with open(scenario.plaintext_filename, "rb") as infile:
plaintext = infile.read()
key_provider = StaticStoredMasterKeyProvider()
key_provider.add_master_keys_from_list(scenario.key_ids)
decrypted_ciphertext, _header = aws_encryption_sdk.decrypt(source=ciphertext, key_provider=key_provider)
assert decrypted_ciphertext == plaintext
| true
| true
|
79072489b95e13f5dbe0e9f2641b90061a79a26f
| 894
|
py
|
Python
|
home_app/views.py
|
xjati46/agoraschool
|
98e9c6510f50a9ee87b5a6e3627466d244f7a617
|
[
"MIT"
] | null | null | null |
home_app/views.py
|
xjati46/agoraschool
|
98e9c6510f50a9ee87b5a6e3627466d244f7a617
|
[
"MIT"
] | null | null | null |
home_app/views.py
|
xjati46/agoraschool
|
98e9c6510f50a9ee87b5a6e3627466d244f7a617
|
[
"MIT"
] | null | null | null |
from django.views.generic import TemplateView, CreateView, UpdateView
from django.urls import reverse_lazy
from home_app import forms
from django.contrib.auth.mixins import LoginRequiredMixin
from account_app.models import CustomUser
# Create your views here.
class IndexView(TemplateView):
template_name = 'home_app/index.html'
class ProfileView(LoginRequiredMixin, TemplateView):
template_name = 'home_app/profile.html'
class RegistrationView(CreateView):
form_class = forms.UserCreateForm
success_url = reverse_lazy('home-app:index')
template_name = 'registration/registration.html'
class UserUpdateView(UpdateView):
form_class = forms.UserUpdateForm
success_url = reverse_lazy('home-app:profile')
template_name = 'registration/registration_form.html'
model = CustomUser
class Page403View(TemplateView):
template_name = 'home_app/403.html'
| 27.090909
| 69
| 0.787472
|
from django.views.generic import TemplateView, CreateView, UpdateView
from django.urls import reverse_lazy
from home_app import forms
from django.contrib.auth.mixins import LoginRequiredMixin
from account_app.models import CustomUser
class IndexView(TemplateView):
template_name = 'home_app/index.html'
class ProfileView(LoginRequiredMixin, TemplateView):
template_name = 'home_app/profile.html'
class RegistrationView(CreateView):
form_class = forms.UserCreateForm
success_url = reverse_lazy('home-app:index')
template_name = 'registration/registration.html'
class UserUpdateView(UpdateView):
form_class = forms.UserUpdateForm
success_url = reverse_lazy('home-app:profile')
template_name = 'registration/registration_form.html'
model = CustomUser
class Page403View(TemplateView):
template_name = 'home_app/403.html'
| true
| true
|
7907253ab43db2fa4c6358f03b0c7aa789a281fb
| 1,290
|
py
|
Python
|
messages.py
|
Cedric0303/Vaccination-Notifier
|
167d3acfb35a904bbf2b1f49451c2cb32a606c96
|
[
"MIT"
] | 2
|
2021-07-02T05:03:34.000Z
|
2021-07-06T10:32:24.000Z
|
messages.py
|
Cedric0303/Vaccination-Notifier
|
167d3acfb35a904bbf2b1f49451c2cb32a606c96
|
[
"MIT"
] | null | null | null |
messages.py
|
Cedric0303/Vaccination-Notifier
|
167d3acfb35a904bbf2b1f49451c2cb32a606c96
|
[
"MIT"
] | null | null | null |
"""
strings and logic related to composing notifications
"""
HELLO_STATUS = "Hello! I'm Vaccination Notifier"
HELLO_MESSAGE = (
"Hello there!\n"
"\n"
"I'm Vaccination Notifier. This is just a message to let you know I'm running and "
"to test our notification configuration. I'll check for changes to your "
"vaccination status once every {delay} minutes---unless I crash! Every now and then, "
"you should probably check on me to make sure nothing has gone wrong.\n"
"\n"
"Love,\n"
"Vaccination Notifier"
)
def hello_message(delay):
return (HELLO_STATUS, HELLO_MESSAGE.format(delay=delay))
UPDATE_STATUS = "Vaccination update detected"
UPDATE_MESSAGE = (
"Hello there!\n"
"\n"
"I noticed that your vaccination results page was updated recently. Here's "
"a summary of the update:\n"
"Health Facility:{facility}\n"
"Vaccination Location:{location}\n"
"Date:{date}\n"
"Time:{time}\n"
"\n"
"Love,\n"
"Vaccination Notifier"
)
def update_message(dict):
facility = dict['Health Facility:']
location = dict['Vaccination Location:']
date = dict['Date:']
time = dict['Time:']
return (UPDATE_STATUS,
UPDATE_MESSAGE.format(facility=facility, location=location, date=date, time=time))
| 30.714286
| 90
| 0.675969
|
HELLO_STATUS = "Hello! I'm Vaccination Notifier"
HELLO_MESSAGE = (
"Hello there!\n"
"\n"
"I'm Vaccination Notifier. This is just a message to let you know I'm running and "
"to test our notification configuration. I'll check for changes to your "
"vaccination status once every {delay} minutes---unless I crash! Every now and then, "
"you should probably check on me to make sure nothing has gone wrong.\n"
"\n"
"Love,\n"
"Vaccination Notifier"
)
def hello_message(delay):
return (HELLO_STATUS, HELLO_MESSAGE.format(delay=delay))
UPDATE_STATUS = "Vaccination update detected"
UPDATE_MESSAGE = (
"Hello there!\n"
"\n"
"I noticed that your vaccination results page was updated recently. Here's "
"a summary of the update:\n"
"Health Facility:{facility}\n"
"Vaccination Location:{location}\n"
"Date:{date}\n"
"Time:{time}\n"
"\n"
"Love,\n"
"Vaccination Notifier"
)
def update_message(dict):
facility = dict['Health Facility:']
location = dict['Vaccination Location:']
date = dict['Date:']
time = dict['Time:']
return (UPDATE_STATUS,
UPDATE_MESSAGE.format(facility=facility, location=location, date=date, time=time))
| true
| true
|
79072576249906be7c00308ba8ececc40ddbf15a
| 1,923
|
py
|
Python
|
src/quantum/azext_quantum/vendored_sdks/azure_mgmt_quantum/models/target_description_py3.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 2
|
2021-06-05T17:51:26.000Z
|
2021-11-17T11:17:56.000Z
|
src/quantum/azext_quantum/vendored_sdks/azure_mgmt_quantum/models/target_description_py3.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 3
|
2020-05-27T20:16:26.000Z
|
2020-07-23T19:46:49.000Z
|
src/quantum/azext_quantum/vendored_sdks/azure_mgmt_quantum/models/target_description_py3.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 5
|
2020-05-09T17:47:09.000Z
|
2020-10-01T19:52:06.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TargetDescription(Model):
"""Information about a Target. A target is the component that can process a
specific type of Job.
:param id: Unique target id.
:type id: str
:param name: Display name of this target.
:type name: str
:param description: A description about this target.
:type description: str
:param accepted_data_formats: List of data formats accepted by this
target.
:type accepted_data_formats: list[str]
:param accepted_content_encodings: List of content encodings accepted by
this target.
:type accepted_content_encodings: list[str]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'accepted_data_formats': {'key': 'acceptedDataFormats', 'type': '[str]'},
'accepted_content_encodings': {'key': 'acceptedContentEncodings', 'type': '[str]'},
}
def __init__(self, *, id: str=None, name: str=None, description: str=None, accepted_data_formats=None, accepted_content_encodings=None, **kwargs) -> None:
super(TargetDescription, self).__init__(**kwargs)
self.id = id
self.name = name
self.description = description
self.accepted_data_formats = accepted_data_formats
self.accepted_content_encodings = accepted_content_encodings
| 40.0625
| 158
| 0.632345
|
from msrest.serialization import Model
class TargetDescription(Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'accepted_data_formats': {'key': 'acceptedDataFormats', 'type': '[str]'},
'accepted_content_encodings': {'key': 'acceptedContentEncodings', 'type': '[str]'},
}
def __init__(self, *, id: str=None, name: str=None, description: str=None, accepted_data_formats=None, accepted_content_encodings=None, **kwargs) -> None:
super(TargetDescription, self).__init__(**kwargs)
self.id = id
self.name = name
self.description = description
self.accepted_data_formats = accepted_data_formats
self.accepted_content_encodings = accepted_content_encodings
| true
| true
|
7907258972bbefc6cc9463d808def2868ecddece
| 4,207
|
py
|
Python
|
src/csi_rover_controls/deprecated/simple_rover_controller.py
|
BhargavRE25/Rover-Machine-Learning
|
af48811ceb08acae1dda76473d294f362178dcbe
|
[
"MIT"
] | 3
|
2020-09-21T17:15:08.000Z
|
2020-09-25T01:08:19.000Z
|
src/csi_rover_controls/deprecated/simple_rover_controller.py
|
columbia-university-robotics/vehicle-machine-learning
|
af48811ceb08acae1dda76473d294f362178dcbe
|
[
"MIT"
] | null | null | null |
src/csi_rover_controls/deprecated/simple_rover_controller.py
|
columbia-university-robotics/vehicle-machine-learning
|
af48811ceb08acae1dda76473d294f362178dcbe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
import math
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist
class SimpleRoverController:
def __init__(self):
self.namespace = rospy.get_param("name_space", "scout_1")
self.w_s = rospy.get_param("wheel_separation", 1.7680) # wheel seperation
self.w_r = rospy.get_param("wheel_separation", 0.3048) # wheel radisu
if "/" in self.namespace:
rospy.logerr("[rover_motion_controller] invalid namespace. namespace can not contain /")
exit(1)
self.lf_steering_pub = rospy.Publisher("/" + self.namespace + "/fl_steering_arm_controller/command", Float64, queue_size=2)
self.rf_steering_pub = rospy.Publisher("/" + self.namespace + "/fr_steering_arm_controller/command", Float64, queue_size=2)
self.lr_steering_pub = rospy.Publisher("/" + self.namespace + "/bl_steering_arm_controller/command", Float64, queue_size=2)
self.rr_steering_pub = rospy.Publisher("/" + self.namespace + "/br_steering_arm_controller/command", Float64, queue_size=2)
self.lf_axle_pub = rospy.Publisher("/" + self.namespace + "/fl_wheel_controller/command", Float64, queue_size=2)
self.rf_axle_pub = rospy.Publisher("/" + self.namespace + "/fr_wheel_controller/command", Float64, queue_size=2)
self.lr_axle_pub = rospy.Publisher("/" + self.namespace + "/bl_wheel_controller/command", Float64, queue_size=2)
self.rr_axle_pub = rospy.Publisher("/" + self.namespace + "/br_wheel_controller/command", Float64, queue_size=2)
self.steering_cmd = 0
self.linear_vel = 0
self.linear_x = 0
self.angular_z = 0
rospy.Subscriber("/csi_rover/cmd_vel", Twist, callback=self.directional_movement)
rospy.init_node('rover_motion_controller', anonymous=True)
rate = rospy.Rate(30) # 10hz
while not rospy.is_shutdown():
# check to see if there's an explicit yaw command
if self.angular_z != 0:
self.rf_axle_pub.publish((self.linear_x + self.angular_z * self.w_s / 2.0) / self.w_r)
self.rr_axle_pub.publish((self.linear_x + self.angular_z * self.w_s / 2.0) / self.w_r)
self.lf_axle_pub.publish((self.linear_x - self.angular_z * self.w_s / 2.0) / self.w_r)
self.lr_axle_pub.publish((self.linear_x - self.angular_z * self.w_s / 2.0) / self.w_r)
# lock all steering joints to be zero
self.synchronized_steering(0)
# else use crab steering
else:
self.lf_axle_pub.publish(self.linear_vel)
self.lr_axle_pub.publish(self.linear_vel)
self.rf_axle_pub.publish(self.linear_vel)
self.rr_axle_pub.publish(self.linear_vel)
self.synchronized_steering(self.steering_cmd)
rate.sleep()
# move all of the steering joints to a position.
# the parameter is an angle value in radians
def synchronized_steering(self, angle):
self.lf_steering_pub.publish(angle)
self.rf_steering_pub.publish(angle)
self.lr_steering_pub.publish(angle)
self.rr_steering_pub.publish(angle)
# Determine steering angle
# Set linear_vel as magnitude
# Range -pi/2 to pi/2
# else use skid_steering
def directional_movement(self, data):
# data comes in as ( x , y )
# https://answers.ros.org/question/29706/twist-message-example-and-cmd_vel/
# rospy.loginfo("Received a /cmd_vel message!")
# rospy.loginfo("Linear Components: [%f, %f, %f]"%(data.linear.x, data.linear.y, data.linear.z))
# rospy.loginfo("Angular Components: [%f, %f, %f]"%(data.angular.x, data.angular.y, data.angular.z))
theta = math.atan2(data.linear.x, data.linear.y)
self.steering_cmd = theta
self.linear_vel = math.sqrt(math.pow(data.linear.x, 2) + math.pow(data.linear.y, 2))
self.angular_z = data.angular.z
self.linear_x = data.linear.x
if __name__ == '__main__':
try:
SimpleRoverController()
except rospy.ROSInterruptExoception:
pass
| 43.822917
| 131
| 0.651058
|
import rospy
import math
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist
class SimpleRoverController:
def __init__(self):
self.namespace = rospy.get_param("name_space", "scout_1")
self.w_s = rospy.get_param("wheel_separation", 1.7680)
self.w_r = rospy.get_param("wheel_separation", 0.3048)
if "/" in self.namespace:
rospy.logerr("[rover_motion_controller] invalid namespace. namespace can not contain /")
exit(1)
self.lf_steering_pub = rospy.Publisher("/" + self.namespace + "/fl_steering_arm_controller/command", Float64, queue_size=2)
self.rf_steering_pub = rospy.Publisher("/" + self.namespace + "/fr_steering_arm_controller/command", Float64, queue_size=2)
self.lr_steering_pub = rospy.Publisher("/" + self.namespace + "/bl_steering_arm_controller/command", Float64, queue_size=2)
self.rr_steering_pub = rospy.Publisher("/" + self.namespace + "/br_steering_arm_controller/command", Float64, queue_size=2)
self.lf_axle_pub = rospy.Publisher("/" + self.namespace + "/fl_wheel_controller/command", Float64, queue_size=2)
self.rf_axle_pub = rospy.Publisher("/" + self.namespace + "/fr_wheel_controller/command", Float64, queue_size=2)
self.lr_axle_pub = rospy.Publisher("/" + self.namespace + "/bl_wheel_controller/command", Float64, queue_size=2)
self.rr_axle_pub = rospy.Publisher("/" + self.namespace + "/br_wheel_controller/command", Float64, queue_size=2)
self.steering_cmd = 0
self.linear_vel = 0
self.linear_x = 0
self.angular_z = 0
rospy.Subscriber("/csi_rover/cmd_vel", Twist, callback=self.directional_movement)
rospy.init_node('rover_motion_controller', anonymous=True)
rate = rospy.Rate(30)
while not rospy.is_shutdown():
if self.angular_z != 0:
self.rf_axle_pub.publish((self.linear_x + self.angular_z * self.w_s / 2.0) / self.w_r)
self.rr_axle_pub.publish((self.linear_x + self.angular_z * self.w_s / 2.0) / self.w_r)
self.lf_axle_pub.publish((self.linear_x - self.angular_z * self.w_s / 2.0) / self.w_r)
self.lr_axle_pub.publish((self.linear_x - self.angular_z * self.w_s / 2.0) / self.w_r)
# lock all steering joints to be zero
self.synchronized_steering(0)
# else use crab steering
else:
self.lf_axle_pub.publish(self.linear_vel)
self.lr_axle_pub.publish(self.linear_vel)
self.rf_axle_pub.publish(self.linear_vel)
self.rr_axle_pub.publish(self.linear_vel)
self.synchronized_steering(self.steering_cmd)
rate.sleep()
# move all of the steering joints to a position.
# the parameter is an angle value in radians
def synchronized_steering(self, angle):
self.lf_steering_pub.publish(angle)
self.rf_steering_pub.publish(angle)
self.lr_steering_pub.publish(angle)
self.rr_steering_pub.publish(angle)
# Determine steering angle
# Set linear_vel as magnitude
# Range -pi/2 to pi/2
# else use skid_steering
def directional_movement(self, data):
# data comes in as ( x , y )
# https://answers.ros.org/question/29706/twist-message-example-and-cmd_vel/
# rospy.loginfo("Received a /cmd_vel message!")
# rospy.loginfo("Linear Components: [%f, %f, %f]"%(data.linear.x, data.linear.y, data.linear.z))
# rospy.loginfo("Angular Components: [%f, %f, %f]"%(data.angular.x, data.angular.y, data.angular.z))
theta = math.atan2(data.linear.x, data.linear.y)
self.steering_cmd = theta
self.linear_vel = math.sqrt(math.pow(data.linear.x, 2) + math.pow(data.linear.y, 2))
self.angular_z = data.angular.z
self.linear_x = data.linear.x
if __name__ == '__main__':
try:
SimpleRoverController()
except rospy.ROSInterruptExoception:
pass
| true
| true
|
7907258f4a7819efea66d391d5c390958abf5e17
| 598
|
py
|
Python
|
server/djangoapp/admin.py
|
RafaelJon/agfzb-CloudAppDevelopment_Capstone
|
006ea1affddb409e5a43659a7e9adca479e2d104
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/admin.py
|
RafaelJon/agfzb-CloudAppDevelopment_Capstone
|
006ea1affddb409e5a43659a7e9adca479e2d104
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/admin.py
|
RafaelJon/agfzb-CloudAppDevelopment_Capstone
|
006ea1affddb409e5a43659a7e9adca479e2d104
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
# from .models import related models
from .models import CarMake, CarModel
# Register your models here.
# CarModelInline class
class CarModelInline(admin.StackedInline):
model = CarModel.car_makes.through
extra = 3
# CarModelAdmin class
class CarModelAdmin(admin.ModelAdmin):
list_display = ['name']
# CarMakeAdmin class with CarModelInline
class CarMakeAdmin(admin.ModelAdmin):
inlines = [CarModelInline]
list_display = ['name']
# Register models here
admin.site.register(CarMake, CarMakeAdmin)
admin.site.register(CarModel, CarModelAdmin)
| 26
| 44
| 0.77592
|
from django.contrib import admin
from .models import CarMake, CarModel
class CarModelInline(admin.StackedInline):
model = CarModel.car_makes.through
extra = 3
class CarModelAdmin(admin.ModelAdmin):
list_display = ['name']
class CarMakeAdmin(admin.ModelAdmin):
inlines = [CarModelInline]
list_display = ['name']
admin.site.register(CarMake, CarMakeAdmin)
admin.site.register(CarModel, CarModelAdmin)
| true
| true
|
79072799f7f744d11592756ce43654976d9a7ea8
| 1,619
|
py
|
Python
|
tests/test_nexus.py
|
ghuls/weblogo
|
7eab5d1b8a8ec38786fa426af84bd77950835524
|
[
"MIT"
] | 108
|
2015-08-21T10:39:22.000Z
|
2022-03-04T22:10:49.000Z
|
tests/test_nexus.py
|
ghuls/weblogo
|
7eab5d1b8a8ec38786fa426af84bd77950835524
|
[
"MIT"
] | 60
|
2015-07-21T22:55:52.000Z
|
2022-03-24T21:20:00.000Z
|
tests/test_nexus.py
|
ghuls/weblogo
|
7eab5d1b8a8ec38786fa426af84bd77950835524
|
[
"MIT"
] | 40
|
2015-08-04T00:18:23.000Z
|
2021-12-30T13:41:54.000Z
|
#!/usr/bin/env python
import unittest
from weblogo.seq_io._nexus import Nexus
from . import data_stream
class test_nexus(unittest.TestCase):
def test_create(self):
n = Nexus()
self.assertNotEqual(n, None)
def test_parse_f0(self):
f = data_stream("nexus/test_Nexus_input.nex")
n = Nexus(f)
# self.output_basics(n)
expected = [
"t1",
"t2 the name",
"isn'that [a] strange name?",
"one should be punished, for (that)!",
"t5",
"t6",
"t7",
"t8",
"t9",
]
taxa = n.taxlabels
self.assertEqual(taxa, expected)
f.close()
def test_parse_protein(self):
f = data_stream("nexus/protein.nex")
Nexus(f)
f.close()
def test_parse_dna(self):
f = data_stream("nexus/dna.nex")
n = Nexus(f)
taxa = n.taxlabels
taxa.sort()
self.assertEqual(len(taxa), 10)
self.assertEqual(taxa[0], "Carp")
self.assertEqual(taxa[-1], "Whale")
f.close()
def test_TreeTest1(self):
"""Test Tree module."""
f = data_stream("nexus/test_Nexus_input.nex")
n = Nexus(f)
t3 = n.trees[2]
n.trees[2]
t3.root_with_outgroup(["t1", "t5"])
# Return node_id of common ancestor if
# taxon_list is monophyletic, -1 otherwise.
self.assertEqual(t3.is_monophyletic(["t1", "t5"]), 13)
t3.split(parent_id=t3.search_taxon("t9"))
f.close()
if __name__ == "__main__":
unittest.main()
| 23.463768
| 62
| 0.536751
|
import unittest
from weblogo.seq_io._nexus import Nexus
from . import data_stream
class test_nexus(unittest.TestCase):
def test_create(self):
n = Nexus()
self.assertNotEqual(n, None)
def test_parse_f0(self):
f = data_stream("nexus/test_Nexus_input.nex")
n = Nexus(f)
expected = [
"t1",
"t2 the name",
"isn'that [a] strange name?",
"one should be punished, for (that)!",
"t5",
"t6",
"t7",
"t8",
"t9",
]
taxa = n.taxlabels
self.assertEqual(taxa, expected)
f.close()
def test_parse_protein(self):
f = data_stream("nexus/protein.nex")
Nexus(f)
f.close()
def test_parse_dna(self):
f = data_stream("nexus/dna.nex")
n = Nexus(f)
taxa = n.taxlabels
taxa.sort()
self.assertEqual(len(taxa), 10)
self.assertEqual(taxa[0], "Carp")
self.assertEqual(taxa[-1], "Whale")
f.close()
def test_TreeTest1(self):
f = data_stream("nexus/test_Nexus_input.nex")
n = Nexus(f)
t3 = n.trees[2]
n.trees[2]
t3.root_with_outgroup(["t1", "t5"])
# Return node_id of common ancestor if
# taxon_list is monophyletic, -1 otherwise.
self.assertEqual(t3.is_monophyletic(["t1", "t5"]), 13)
t3.split(parent_id=t3.search_taxon("t9"))
f.close()
if __name__ == "__main__":
unittest.main()
| true
| true
|
7907286715e94bb49b19784d5b7f49124bdf474c
| 7,922
|
py
|
Python
|
docs/conf.py
|
metaist/pageit
|
11c2ade12d527c582585af482c285b9b38895861
|
[
"MIT"
] | 1
|
2015-06-29T11:44:45.000Z
|
2015-06-29T11:44:45.000Z
|
docs/conf.py
|
metaist/pageit
|
11c2ade12d527c582585af482c285b9b38895861
|
[
"MIT"
] | 1
|
2015-02-24T18:07:21.000Z
|
2015-02-25T02:15:47.000Z
|
docs/conf.py
|
metaist/pageit
|
11c2ade12d527c582585af482c285b9b38895861
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# coding: utf-8
# This file is execfile()d with the current directory set to its containing
# dir. Note that not all possible configuration values are present in this
# autogenerated file. All configuration values have a default; values that are
# commented out serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join('..')))
import pageit # noqa
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest', 'sphinx.ext.autodoc',
'sphinxcontrib.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pageit'
copyright = u'2013, Metaist'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pageit.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pageitdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pageit.tex', u'pageit Documentation',
u'The Metaist', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pageit', u'pageit Documentation',
[u'The Metaist'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pageit', u'pageit Documentation',
u'The Metaist', 'pageit', pageit.__doc__.split('\n')[0],
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.072874
| 79
| 0.709038
|
import sys
import os
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath(os.path.join('..')))
import pageit
extensions = ['sphinx.ext.doctest', 'sphinx.ext.autodoc',
'sphinxcontrib.napoleon']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'pageit'
copyright = u'2013, Metaist'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pageit.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pageitdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pageit.tex', u'pageit Documentation',
u'The Metaist', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pageit', u'pageit Documentation',
[u'The Metaist'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pageit', u'pageit Documentation',
u'The Metaist', 'pageit', pageit.__doc__.split('\n')[0],
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| true
| true
|
79072a399bbddc97922d302380458fcac9f3431b
| 5,076
|
py
|
Python
|
chart-generation/charts/vaccines.py
|
maldins46/CovidTracker
|
6a50e780935de62e07c691fae2363c290aae5795
|
[
"MIT"
] | null | null | null |
chart-generation/charts/vaccines.py
|
maldins46/CovidTracker
|
6a50e780935de62e07c691fae2363c290aae5795
|
[
"MIT"
] | 13
|
2020-11-04T22:39:55.000Z
|
2022-03-02T10:27:45.000Z
|
chart-generation/charts/vaccines.py
|
maldins46/CovidTracker
|
6a50e780935de62e07c691fae2363c290aae5795
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Charts about the national vaccines data.
@author: riccardomaldini
"""
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from data_extractors.vaccines_regions import benchmark_dict, marche_df
from data_extractors.vaccines_italy import italy_df
from data_extractors.area_names import area_names_dict
from matplotlib.dates import MonthLocator
import utils
def adm_doses_italy(save_image=False, show=False):
"""
Administration data about Italy.
"""
# plt.stackplot(data['data_somministrazione'], data['prima_dose'],data['seconda_dose'],
# labels=['Prime dosi', 'Seconde dosi'])
plt.bar(italy_df['data_somministrazione'], italy_df['prima_dose'], label='Prime dosi')
plt.bar(italy_df['data_somministrazione'], italy_df['seconda_dose'], bottom=italy_df['prima_dose'],
label='Seconde dosi')
plt.title("Somministrazioni giornaliere Italia,\ncon distinzione prima dose/richiamo\n")
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/dosi_italia.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
def adm_doses_marche(save_image=False, show=False):
"""
Administration data about Italy.
"""
plt.bar(marche_df['data_somministrazione'], marche_df['prima_dose'], label='Prime dosi')
plt.bar(marche_df['data_somministrazione'], marche_df['seconda_dose'], bottom=marche_df['prima_dose'],
label='Seconde dosi')
plt.title("Somministrazioni giornaliere Marche,\ncon distinzione prima dose/richiamo\n")
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/dosi_marche.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
def regional_doses(save_image=False, show=False):
"""
Comparation between doses administrated in various regions
"""
for area_code, region_data in benchmark_dict.items():
rolling_avg_adm = region_data['totale_per_100000_ab'].rolling(7, center=True).mean()
plt.plot(region_data['data_somministrazione'], rolling_avg_adm, label=area_names_dict[area_code])
rolling_avg_adm = italy_df['totale_per_100000_ab'].rolling(7, center=True).mean()
plt.plot(italy_df['data_somministrazione'], rolling_avg_adm, alpha=0.5, linestyle=':',
label="Italia")
plt.title('Andamento delle somministrazioni giornaliere\nper 100.000 abitanti, confronto tra le regioni del benchmark\n')
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/dosi_per_regioni.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
def immunes_percentage(save_image=False, show=False):
"""
Computes and plots relations between the population of a place and people that took the second shot.
"""
for area_code, region_data in benchmark_dict.items():
plt.plot(region_data['data_somministrazione'], region_data['seconda_dose_totale_storico_su_pop'],
label=area_names_dict[area_code])
plt.plot(italy_df['data_somministrazione'], italy_df['seconda_dose_totale_storico_su_pop'], alpha=0.5, linestyle=':',
label="Italia")
plt.title('Percentuale popolazione immunizzata,\nconfronto tra le regioni del benchmark\n')
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1))
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/immunizzati.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
| 38.165414
| 125
| 0.718676
|
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from data_extractors.vaccines_regions import benchmark_dict, marche_df
from data_extractors.vaccines_italy import italy_df
from data_extractors.area_names import area_names_dict
from matplotlib.dates import MonthLocator
import utils
def adm_doses_italy(save_image=False, show=False):
plt.bar(italy_df['data_somministrazione'], italy_df['prima_dose'], label='Prime dosi')
plt.bar(italy_df['data_somministrazione'], italy_df['seconda_dose'], bottom=italy_df['prima_dose'],
label='Seconde dosi')
plt.title("Somministrazioni giornaliere Italia,\ncon distinzione prima dose/richiamo\n")
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/dosi_italia.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
def adm_doses_marche(save_image=False, show=False):
plt.bar(marche_df['data_somministrazione'], marche_df['prima_dose'], label='Prime dosi')
plt.bar(marche_df['data_somministrazione'], marche_df['seconda_dose'], bottom=marche_df['prima_dose'],
label='Seconde dosi')
plt.title("Somministrazioni giornaliere Marche,\ncon distinzione prima dose/richiamo\n")
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/dosi_marche.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
def regional_doses(save_image=False, show=False):
for area_code, region_data in benchmark_dict.items():
rolling_avg_adm = region_data['totale_per_100000_ab'].rolling(7, center=True).mean()
plt.plot(region_data['data_somministrazione'], rolling_avg_adm, label=area_names_dict[area_code])
rolling_avg_adm = italy_df['totale_per_100000_ab'].rolling(7, center=True).mean()
plt.plot(italy_df['data_somministrazione'], rolling_avg_adm, alpha=0.5, linestyle=':',
label="Italia")
plt.title('Andamento delle somministrazioni giornaliere\nper 100.000 abitanti, confronto tra le regioni del benchmark\n')
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/dosi_per_regioni.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
def immunes_percentage(save_image=False, show=False):
for area_code, region_data in benchmark_dict.items():
plt.plot(region_data['data_somministrazione'], region_data['seconda_dose_totale_storico_su_pop'],
label=area_names_dict[area_code])
plt.plot(italy_df['data_somministrazione'], italy_df['seconda_dose_totale_storico_su_pop'], alpha=0.5, linestyle=':',
label="Italia")
plt.title('Percentuale popolazione immunizzata,\nconfronto tra le regioni del benchmark\n')
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1))
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/immunizzati.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
| true
| true
|
79072ab4b8bdee5c0b7cdf45787af5e634de5c1e
| 607
|
py
|
Python
|
setup.py
|
Annabelle-Brown/q2-autopepsirf
|
76fded20b4b7064885c2124e0e32895321b976c4
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Annabelle-Brown/q2-autopepsirf
|
76fded20b4b7064885c2124e0e32895321b976c4
|
[
"Apache-2.0"
] | 4
|
2022-01-18T22:50:00.000Z
|
2022-03-21T17:47:42.000Z
|
setup.py
|
Annabelle-Brown/q2-autopepsirf
|
76fded20b4b7064885c2124e0e32895321b976c4
|
[
"Apache-2.0"
] | 1
|
2021-11-18T22:38:31.000Z
|
2021-11-18T22:38:31.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import versioneer
setup(
name="q2-autopepsirf",
version=versioneer.get_version(),
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
package_data={},
author="Annabelle Brown",
author_email="annabelle811@live.com",
description="Auto-Run q2-pepsirf and q2-ps-plot",
license='Apache-2.0',
url="https://github.com/LadnerLab/q2-autopepsirf",
entry_points={
'qiime2.plugins': ['q2-autopepsirf=q2_autopepsirf.plugin_setup:plugin']
},
zip_safe=False,
)
| 28.904762
| 80
| 0.672158
|
from setuptools import setup, find_packages
import versioneer
setup(
name="q2-autopepsirf",
version=versioneer.get_version(),
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
package_data={},
author="Annabelle Brown",
author_email="annabelle811@live.com",
description="Auto-Run q2-pepsirf and q2-ps-plot",
license='Apache-2.0',
url="https://github.com/LadnerLab/q2-autopepsirf",
entry_points={
'qiime2.plugins': ['q2-autopepsirf=q2_autopepsirf.plugin_setup:plugin']
},
zip_safe=False,
)
| true
| true
|
79072afff678c079754fc12a9c38b39101e119b0
| 2,459
|
py
|
Python
|
freesas/__init__.py
|
kif/freesas
|
d4e468726e1c2486814ff07871d49dfadf77e437
|
[
"MIT"
] | 7
|
2015-06-30T13:13:43.000Z
|
2021-12-22T07:13:02.000Z
|
freesas/__init__.py
|
kif/freesas
|
d4e468726e1c2486814ff07871d49dfadf77e437
|
[
"MIT"
] | 47
|
2015-07-20T13:15:55.000Z
|
2022-03-27T07:51:38.000Z
|
freesas/__init__.py
|
kif/freesas
|
d4e468726e1c2486814ff07871d49dfadf77e437
|
[
"MIT"
] | 3
|
2015-04-30T07:41:49.000Z
|
2021-08-19T00:20:23.000Z
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
The silx package contains the following main sub-packages:
- silx.gui: Qt widgets for data visualization and data file browsing
- silx.image: Some processing functions for 2D images
- silx.io: Reading and writing data files (HDF5/NeXus, SPEC, ...)
- silx.math: Some processing functions for 1D, 2D, 3D, nD arrays
- silx.opencl: OpenCL-based data processing
- silx.sx: High-level silx functions suited for (I)Python console.
- silx.utils: Miscellaneous convenient functions
See silx documentation: http://www.silx.org/doc/silx/latest/
"""
__authors__ = ["Jérôme Kieffer"]
__license__ = "MIT"
__date__ = "31/08/2018"
import os as _os
import logging as _logging
_logging.getLogger(__name__).addHandler(_logging.NullHandler())
project = _os.path.basename(_os.path.dirname(_os.path.abspath(__file__)))
try:
from ._version import __date__ as date # noqa
from ._version import (
version,
version_info,
hexversion,
strictversion,
dated_version,
) # noqa
except ImportError:
raise RuntimeError(
"Do NOT use %s from its sources: build it and use the built version"
% project
)
| 37.830769
| 79
| 0.697031
| true
| true
|
|
79072b679ba04a231086536469f530efc9e5d1c6
| 7,817
|
py
|
Python
|
sdk/finbourne_insights/models/audit_process.py
|
finbourne/finbourne-insights-sdk-python
|
33ea49f0157def867405725013218d6f29cc2ee0
|
[
"MIT"
] | null | null | null |
sdk/finbourne_insights/models/audit_process.py
|
finbourne/finbourne-insights-sdk-python
|
33ea49f0157def867405725013218d6f29cc2ee0
|
[
"MIT"
] | null | null | null |
sdk/finbourne_insights/models/audit_process.py
|
finbourne/finbourne-insights-sdk-python
|
33ea49f0157def867405725013218d6f29cc2ee0
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
FINBOURNE Insights API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.0.238
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from finbourne_insights.configuration import Configuration
class AuditProcess(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'name': 'str',
'run_id': 'str',
'start_time': 'datetime',
'end_time': 'datetime',
'succeeded': 'bool'
}
attribute_map = {
'name': 'name',
'run_id': 'runId',
'start_time': 'startTime',
'end_time': 'endTime',
'succeeded': 'succeeded'
}
required_map = {
'name': 'required',
'run_id': 'required',
'start_time': 'required',
'end_time': 'optional',
'succeeded': 'optional'
}
def __init__(self, name=None, run_id=None, start_time=None, end_time=None, succeeded=None, local_vars_configuration=None): # noqa: E501
"""AuditProcess - a model defined in OpenAPI"
:param name: (required)
:type name: str
:param run_id: (required)
:type run_id: str
:param start_time: (required)
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param succeeded:
:type succeeded: bool
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._run_id = None
self._start_time = None
self._end_time = None
self._succeeded = None
self.discriminator = None
self.name = name
self.run_id = run_id
self.start_time = start_time
self.end_time = end_time
self.succeeded = succeeded
@property
def name(self):
"""Gets the name of this AuditProcess. # noqa: E501
:return: The name of this AuditProcess. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AuditProcess.
:param name: The name of this AuditProcess. # noqa: E501
:type name: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) > 128):
raise ValueError("Invalid value for `name`, length must be less than or equal to `128`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 0):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `0`") # noqa: E501
self._name = name
@property
def run_id(self):
"""Gets the run_id of this AuditProcess. # noqa: E501
:return: The run_id of this AuditProcess. # noqa: E501
:rtype: str
"""
return self._run_id
@run_id.setter
def run_id(self, run_id):
"""Sets the run_id of this AuditProcess.
:param run_id: The run_id of this AuditProcess. # noqa: E501
:type run_id: str
"""
if self.local_vars_configuration.client_side_validation and run_id is None: # noqa: E501
raise ValueError("Invalid value for `run_id`, must not be `None`") # noqa: E501
self._run_id = run_id
@property
def start_time(self):
"""Gets the start_time of this AuditProcess. # noqa: E501
:return: The start_time of this AuditProcess. # noqa: E501
:rtype: datetime
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this AuditProcess.
:param start_time: The start_time of this AuditProcess. # noqa: E501
:type start_time: datetime
"""
if self.local_vars_configuration.client_side_validation and start_time is None: # noqa: E501
raise ValueError("Invalid value for `start_time`, must not be `None`") # noqa: E501
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this AuditProcess. # noqa: E501
:return: The end_time of this AuditProcess. # noqa: E501
:rtype: datetime
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this AuditProcess.
:param end_time: The end_time of this AuditProcess. # noqa: E501
:type end_time: datetime
"""
self._end_time = end_time
@property
def succeeded(self):
"""Gets the succeeded of this AuditProcess. # noqa: E501
:return: The succeeded of this AuditProcess. # noqa: E501
:rtype: bool
"""
return self._succeeded
@succeeded.setter
def succeeded(self, succeeded):
"""Sets the succeeded of this AuditProcess.
:param succeeded: The succeeded of this AuditProcess. # noqa: E501
:type succeeded: bool
"""
self._succeeded = succeeded
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuditProcess):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AuditProcess):
return True
return self.to_dict() != other.to_dict()
| 29.277154
| 140
| 0.584751
|
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re
import six
from finbourne_insights.configuration import Configuration
class AuditProcess(object):
openapi_types = {
'name': 'str',
'run_id': 'str',
'start_time': 'datetime',
'end_time': 'datetime',
'succeeded': 'bool'
}
attribute_map = {
'name': 'name',
'run_id': 'runId',
'start_time': 'startTime',
'end_time': 'endTime',
'succeeded': 'succeeded'
}
required_map = {
'name': 'required',
'run_id': 'required',
'start_time': 'required',
'end_time': 'optional',
'succeeded': 'optional'
}
def __init__(self, name=None, run_id=None, start_time=None, end_time=None, succeeded=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._run_id = None
self._start_time = None
self._end_time = None
self._succeeded = None
self.discriminator = None
self.name = name
self.run_id = run_id
self.start_time = start_time
self.end_time = end_time
self.succeeded = succeeded
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if self.local_vars_configuration.client_side_validation and name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) > 128):
raise ValueError("Invalid value for `name`, length must be less than or equal to `128`")
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 0):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `0`")
self._name = name
@property
def run_id(self):
return self._run_id
@run_id.setter
def run_id(self, run_id):
if self.local_vars_configuration.client_side_validation and run_id is None:
raise ValueError("Invalid value for `run_id`, must not be `None`")
self._run_id = run_id
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, start_time):
if self.local_vars_configuration.client_side_validation and start_time is None:
raise ValueError("Invalid value for `start_time`, must not be `None`")
self._start_time = start_time
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, end_time):
self._end_time = end_time
@property
def succeeded(self):
return self._succeeded
@succeeded.setter
def succeeded(self, succeeded):
self._succeeded = succeeded
def to_dict(self, serialize=False):
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AuditProcess):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, AuditProcess):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
79072c12edc6880eee5bb281b85b99ff6406f425
| 3,130
|
py
|
Python
|
revolt/channel.py
|
XiehCanCode/revolt.py
|
0b14143610f544d73ba9dde02adedafc51d76228
|
[
"MIT"
] | null | null | null |
revolt/channel.py
|
XiehCanCode/revolt.py
|
0b14143610f544d73ba9dde02adedafc51d76228
|
[
"MIT"
] | null | null | null |
revolt/channel.py
|
XiehCanCode/revolt.py
|
0b14143610f544d73ba9dde02adedafc51d76228
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from .enums import ChannelType
from .messageable import Messageable
if TYPE_CHECKING:
from .state import State
from .types import Channel as ChannelPayload
from .types import DMChannel as DMChannelPayload
from .types import Group as GroupDMChannelPayload
from .types import SavedMessages as SavedMessagesPayload
from .types import TextChannel as TextChannelPayload
from .user import User
__all__ = ("Channel",)
class Channel:
"""Base class for all channels
Attributes
-----------
id: :class:`str`
The id of the channel
channel_type: ChannelType
The type of the channel
server: Optional[:class:`Server`]
The server the channel is part of
"""
__slots__ = ("state", "id", "channel_type", "server")
def __init__(self, data: ChannelPayload, state: State):
self.state = state
self.id = data["_id"]
self.channel_type = ChannelType(data["channel_type"])
self.server = None
class SavedMessageChannel(Channel, Messageable):
"""The Saved Message Channel"""
def __init__(self, data: SavedMessagesPayload, state: State):
super().__init__(data, state)
class DMChannel(Channel, Messageable):
"""A DM channel"""
def __init__(self, data: DMChannelPayload, state: State):
super().__init__(data, state)
class GroupDMChannel(Channel, Messageable):
__slots__ = ("recipients", "name", "owner")
"""A group DM channel"""
def __init__(self, data: GroupDMChannelPayload, state: State):
super().__init__(data, state)
self.recipients = cast(list[User], list(filter(bool, [state.get_user(user_id) for user_id in data["recipients"]])))
self.name = data["name"]
self.owner = state.get_user(data["owner"])
class TextChannel(Channel, Messageable):
__slots__ = ("name", "description", "last_message", "last_message_id")
"""A text channel"""
def __init__(self, data: TextChannelPayload, state: State):
super().__init__(data, state)
self.server = state.get_server(data["server"])
self.name = data["name"]
self.description = data.get("description")
last_message_id = data.get("last_message")
self.last_message = state.get_message(last_message_id)
self.last_message_id = last_message_id
class VoiceChannel(Channel):
"""A voice channel"""
def __init__(self, data: ChannelPayload, state: State):
super().__init__(data, state)
def channel_factory(data: ChannelPayload, state: State) -> Channel:
if data["channel_type"] == "SavedMessage":
return SavedMessageChannel(data, state)
elif data["channel_type"] == "DirectMessage":
return DMChannel(data, state)
elif data["channel_type"] == "Group":
return GroupDMChannel(data, state)
elif data["channel_type"] == "TextChannel":
return TextChannel(data, state)
elif data["channel_type"] == "VoiceChannel":
return VoiceChannel(data, state)
else:
raise Exception
| 33.655914
| 123
| 0.66869
|
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from .enums import ChannelType
from .messageable import Messageable
if TYPE_CHECKING:
from .state import State
from .types import Channel as ChannelPayload
from .types import DMChannel as DMChannelPayload
from .types import Group as GroupDMChannelPayload
from .types import SavedMessages as SavedMessagesPayload
from .types import TextChannel as TextChannelPayload
from .user import User
__all__ = ("Channel",)
class Channel:
__slots__ = ("state", "id", "channel_type", "server")
def __init__(self, data: ChannelPayload, state: State):
self.state = state
self.id = data["_id"]
self.channel_type = ChannelType(data["channel_type"])
self.server = None
class SavedMessageChannel(Channel, Messageable):
def __init__(self, data: SavedMessagesPayload, state: State):
super().__init__(data, state)
class DMChannel(Channel, Messageable):
def __init__(self, data: DMChannelPayload, state: State):
super().__init__(data, state)
class GroupDMChannel(Channel, Messageable):
__slots__ = ("recipients", "name", "owner")
def __init__(self, data: GroupDMChannelPayload, state: State):
super().__init__(data, state)
self.recipients = cast(list[User], list(filter(bool, [state.get_user(user_id) for user_id in data["recipients"]])))
self.name = data["name"]
self.owner = state.get_user(data["owner"])
class TextChannel(Channel, Messageable):
__slots__ = ("name", "description", "last_message", "last_message_id")
def __init__(self, data: TextChannelPayload, state: State):
super().__init__(data, state)
self.server = state.get_server(data["server"])
self.name = data["name"]
self.description = data.get("description")
last_message_id = data.get("last_message")
self.last_message = state.get_message(last_message_id)
self.last_message_id = last_message_id
class VoiceChannel(Channel):
def __init__(self, data: ChannelPayload, state: State):
super().__init__(data, state)
def channel_factory(data: ChannelPayload, state: State) -> Channel:
if data["channel_type"] == "SavedMessage":
return SavedMessageChannel(data, state)
elif data["channel_type"] == "DirectMessage":
return DMChannel(data, state)
elif data["channel_type"] == "Group":
return GroupDMChannel(data, state)
elif data["channel_type"] == "TextChannel":
return TextChannel(data, state)
elif data["channel_type"] == "VoiceChannel":
return VoiceChannel(data, state)
else:
raise Exception
| true
| true
|
79072c9ca36518bd1ff26768bbdaf965cafced64
| 224
|
py
|
Python
|
exercicios/Lista3/Q3.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista3/Q3.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista3/Q3.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
#Faça um algoritmo utilizando o comando while que mostra uma
#contagem regressiva na tela, iniciando em 10 e terminando
#em O. Mostrar uma mensagem “FIM!" após a contagem.
i=11
while(i!=0):
i-=1
print(i)
print("FIM")
| 28
| 60
| 0.71875
|
i=11
while(i!=0):
i-=1
print(i)
print("FIM")
| true
| true
|
79072ccd68791b7d45ddef5230c0f168cbea543a
| 38,620
|
py
|
Python
|
official/vision/beta/modeling/layers/detection_generator.py
|
SuwoongHeo/models
|
fc2d4b695d931f79e63d8069b6a04b2877a6553f
|
[
"Apache-2.0"
] | 2
|
2021-11-03T05:14:54.000Z
|
2021-11-09T11:56:14.000Z
|
official/vision/beta/modeling/layers/detection_generator.py
|
GangababuGB/models
|
10ef6bbe39bb5ac3d0e2755dc60b6843d39d395c
|
[
"Apache-2.0"
] | null | null | null |
official/vision/beta/modeling/layers/detection_generator.py
|
GangababuGB/models
|
10ef6bbe39bb5ac3d0e2755dc60b6843d39d395c
|
[
"Apache-2.0"
] | 1
|
2021-10-03T08:34:26.000Z
|
2021-10-03T08:34:26.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions of generators to generate the final detections."""
import contextlib
from typing import List, Optional, Mapping
# Import libraries
import tensorflow as tf
from official.vision.beta.ops import box_ops
from official.vision.beta.ops import nms
from official.vision.beta.ops import preprocess_ops
def _generate_detections_v1(boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str,
tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections given the model outputs.
The implementation unrolls the batch dimension and process images one by one.
It required the batch dimension to be statically known and it is TPU
compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]` for box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
attributes: None or a dict of (attribute_name, attributes) pairs. Each
attributes is a `tf.Tensor` with shape
`[batch_size, N, num_classes, attribute_size]` or
`[batch_size, N, 1, attribute_size]` for attribute predictions on all
feature levels. The N is the number of total anchors on all levels. Can
be None if no attribute learning is required.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A scalar representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0 (which is default), we fall back to standard NMS.
Returns:
nms_boxes: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, 4]` representing top detected boxes in
`[y1, x1, y2, x2]`.
nms_scores: A `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing sorted confidence scores
for detected boxes. The values are between `[0, 1]`.
nms_classes: An `int` type `tf.Tensor` of shape
`[batch_size, max_num_detections]` representing classes for detected
boxes.
valid_detections: An `int` type `tf.Tensor` of shape `[batch_size]` only the
top `valid_detections` boxes are valid detections.
nms_attributes: None or a dict of (attribute_name, attributes). Each
attribute is a `float` type `tf.Tensor` of shape
`[batch_size, max_num_detections, attribute_size]` representing attribute
predictions for detected boxes. Can be an empty dict if no attribute
learning is required.
"""
with tf.name_scope('generate_detections'):
batch_size = scores.get_shape().as_list()[0]
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(batch_size):
(nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, valid_detections_i,
nmsed_att_i) = _generate_detections_per_image(
boxes[i],
scores[i],
attributes={
att_name: att[i] for att_name, att in attributes.items()
} if attributes else {},
pre_nms_top_k=pre_nms_top_k,
pre_nms_score_threshold=pre_nms_score_threshold,
nms_iou_threshold=nms_iou_threshold,
max_num_detections=max_num_detections,
soft_nms_sigma=soft_nms_sigma)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
valid_detections.append(valid_detections_i)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name].append(nmsed_att_i[att_name])
nmsed_boxes = tf.stack(nmsed_boxes, axis=0)
nmsed_scores = tf.stack(nmsed_scores, axis=0)
nmsed_classes = tf.stack(nmsed_classes, axis=0)
valid_detections = tf.stack(valid_detections, axis=0)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _generate_detections_per_image(
boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str, tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
"""Generates the final detections per image given the model outputs.
Args:
boxes: A `tf.Tensor` with shape `[N, num_classes, 4]` or `[N, 1, 4]`, which
box predictions on all feature levels. The N is the number of total
anchors on all levels.
scores: A `tf.Tensor` with shape `[N, num_classes]`, which stacks class
probability on all feature levels. The N is the number of total anchors on
all levels. The num_classes is the number of classes predicted by the
model. Note that the class_outputs here is the raw score.
attributes: If not None, a dict of `tf.Tensor`. Each value is in shape
`[N, num_classes, attribute_size]` or `[N, 1, attribute_size]` of
attribute predictions on all feature levels. The N is the number of total
anchors on all levels.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
If set to None, `tf.image.non_max_suppression_padded` is called instead.
Returns:
nms_boxes: A `float` tf.Tensor of shape `[max_num_detections, 4]`
representing top detected boxes in `[y1, x1, y2, x2]`.
nms_scores: A `float` tf.Tensor of shape `[max_num_detections]` representing
sorted confidence scores for detected boxes. The values are between [0,
1].
nms_classes: An `int` tf.Tensor of shape `[max_num_detections]` representing
classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [1] only the top
`valid_detections` boxes are valid detections.
nms_attributes: None or a dict. Each value is a `float` tf.Tensor of shape
`[max_num_detections, attribute_size]` representing attribute predictions
for detected boxes. Can be an empty dict if `attributes` is None.
"""
nmsed_boxes = []
nmsed_scores = []
nmsed_classes = []
num_classes_for_box = boxes.get_shape().as_list()[1]
num_classes = scores.get_shape().as_list()[1]
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(num_classes):
boxes_i = boxes[:, min(num_classes_for_box - 1, i)]
scores_i = scores[:, i]
# Obtains pre_nms_top_k before running NMS.
scores_i, indices = tf.nn.top_k(
scores_i, k=tf.minimum(tf.shape(scores_i)[-1], pre_nms_top_k))
boxes_i = tf.gather(boxes_i, indices)
if soft_nms_sigma is not None:
(nmsed_indices_i,
nmsed_scores_i) = tf.image.non_max_suppression_with_scores(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
soft_nms_sigma=soft_nms_sigma,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_boxes_i, max_num_detections, 0.0)
nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_scores_i, max_num_detections, -1.0)
else:
(nmsed_indices_i,
nmsed_num_valid_i) = tf.image.non_max_suppression_padded(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_to_max_output_size=True,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i)
# Sets scores of invalid boxes to -1.
nmsed_scores_i = tf.where(
tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]),
nmsed_scores_i, -tf.ones_like(nmsed_scores_i))
nmsed_classes_i = tf.fill([max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
if attributes:
for att_name, att in attributes.items():
num_classes_for_attr = att.get_shape().as_list()[1]
att_i = att[:, min(num_classes_for_attr - 1, i)]
att_i = tf.gather(att_i, indices)
nmsed_att_i = tf.gather(att_i, nmsed_indices_i)
nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_att_i, max_num_detections, 0.0)
nmsed_attributes[att_name].append(nmsed_att_i)
# Concats results from all classes and sort them.
nmsed_boxes = tf.concat(nmsed_boxes, axis=0)
nmsed_scores = tf.concat(nmsed_scores, axis=0)
nmsed_classes = tf.concat(nmsed_classes, axis=0)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices)
nmsed_classes = tf.gather(nmsed_classes, indices)
valid_detections = tf.reduce_sum(
tf.cast(tf.greater(nmsed_scores, -1), tf.int32))
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0)
nmsed_attributes[att_name] = tf.gather(nmsed_attributes[att_name],
indices)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
"""Selects top_k scores and indices for each class.
Args:
scores_in: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: A `tf.Tensor` with shape
`[batch_size, pre_nms_num_detections, num_classes]`.
"""
batch_size, num_anchors, num_class = scores_in.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(scores_in)[0]
scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])
scores_trans = tf.reshape(scores_trans, [-1, num_anchors])
top_k_scores, top_k_indices = tf.nn.top_k(
scores_trans, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores,
[batch_size, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices,
[batch_size, num_class, pre_nms_num_detections])
return tf.transpose(top_k_scores,
[0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1])
def _generate_detections_v2(boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100):
"""Generates the final detections given the model outputs.
This implementation unrolls classes dimension while using the tf.while_loop
to implement the batched NMS, so that it can be parallelized at the batch
dimension. It should give better performance comparing to v1 implementation.
It is TPU compatible.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_top_k: An `int` number of top candidate detections per class before
NMS.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(boxes)[0]
_, total_anchors, num_classes = scores.get_shape().as_list()
# Selects top pre_nms_num scores and indices before NMS.
scores, indices = _select_top_k_scores(
scores, min(total_anchors, pre_nms_top_k))
for i in range(num_classes):
boxes_i = boxes[:, :, min(num_classes_for_box - 1, i), :]
scores_i = scores[:, :, i]
# Obtains pre_nms_top_k before running NMS.
boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1)
# Filter out scores.
boxes_i, scores_i = box_ops.filter_boxes_by_scores(
boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold)
(nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded(
tf.cast(scores_i, tf.float32),
tf.cast(boxes_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold)
nmsed_classes_i = tf.fill([batch_size, max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=1)
nmsed_scores = tf.concat(nmsed_scores, axis=1)
nmsed_classes = tf.concat(nmsed_classes, axis=1)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1)
nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1)
valid_detections = tf.reduce_sum(
input_tensor=tf.cast(tf.greater(nmsed_scores, -1), tf.int32), axis=1)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor,
pre_nms_score_threshold: float,
nms_iou_threshold: float,
max_num_detections: int):
"""Generates detected boxes with scores and classes for one-stage detector.
The function takes output of multi-level ConvNets and anchor boxes and
generates detected boxes. Note that this used batched nms, which is not
supported on TPU currently.
Args:
boxes: A `tf.Tensor` with shape `[batch_size, N, num_classes, 4]` or
`[batch_size, N, 1, 4]`, which box predictions on all feature levels. The
N is the number of total anchors on all levels.
scores: A `tf.Tensor` with shape `[batch_size, N, num_classes]`, which
stacks class probability on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
pre_nms_score_threshold: A `float` representing the threshold for deciding
when to remove boxes based on score.
nms_iou_threshold: A `float` representing the threshold for deciding whether
boxes overlap too much with respect to IOU.
max_num_detections: A `scalar` representing maximum number of boxes retained
over all classes.
Returns:
nms_boxes: A `float` tf.Tensor of shape [batch_size, max_num_detections, 4]
representing top detected boxes in [y1, x1, y2, x2].
nms_scores: A `float` tf.Tensor of shape [batch_size, max_num_detections]
representing sorted confidence scores for detected boxes. The values are
between [0, 1].
nms_classes: An `int` tf.Tensor of shape [batch_size, max_num_detections]
representing classes for detected boxes.
valid_detections: An `int` tf.Tensor of shape [batch_size] only the top
`valid_detections` boxes are valid detections.
"""
with tf.name_scope('generate_detections'):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (
tf.image.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class=max_num_detections,
max_total_size=max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_per_class=False,
clip_boxes=False))
nmsed_classes = tf.cast(nmsed_classes, tf.int32)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
@tf.keras.utils.register_keras_serializable(package='Vision')
class DetectionGenerator(tf.keras.layers.Layer):
"""Generates the final detected boxes with scores and classes."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v2',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression.
If False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are
thrown away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version.
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(DetectionGenerator, self).__init__(**kwargs)
def __call__(self,
raw_boxes: tf.Tensor,
raw_scores: tf.Tensor,
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
regression_weights: Optional[List[float]] = None,
bbox_per_class: bool = True):
"""Generates final detections.
Args:
raw_boxes: A `tf.Tensor` of shape of `[batch_size, K, num_classes * 4]`
representing the class-specific box coordinates relative to anchors.
raw_scores: A `tf.Tensor` of shape of `[batch_size, K, num_classes]`
representing the class logits before applying score activiation.
anchor_boxes: A `tf.Tensor` of shape of `[batch_size, K, 4]` representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of `[batch_size, 2]` storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
regression_weights: A list of four float numbers to scale coordinates.
bbox_per_class: A `bool`. If True, perform per-class box regression.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` `tf.Tensor` of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
"""
box_scores = tf.nn.softmax(raw_scores, axis=-1)
# Removes the background class.
box_scores_shape = tf.shape(box_scores)
box_scores_shape_list = box_scores.get_shape().as_list()
batch_size = box_scores_shape[0]
num_locations = box_scores_shape_list[1]
num_classes = box_scores_shape_list[-1]
box_scores = tf.slice(box_scores, [0, 0, 1], [-1, -1, -1])
if bbox_per_class:
num_detections = num_locations * (num_classes - 1)
raw_boxes = tf.reshape(raw_boxes,
[batch_size, num_locations, num_classes, 4])
raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [-1, -1, -1, -1])
anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1])
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
# Box decoding.
decoded_boxes = box_ops.decode_boxes(
raw_boxes, anchor_boxes, weights=regression_weights)
# Box clipping
decoded_boxes = box_ops.clip_boxes(
decoded_boxes, tf.expand_dims(image_shape, axis=1))
if bbox_per_class:
decoded_boxes = tf.reshape(
decoded_boxes, [batch_size, num_locations, num_classes - 1, 4])
else:
decoded_boxes = tf.expand_dims(decoded_boxes, axis=2)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': decoded_boxes,
'decoded_box_scores': box_scores,
}
# Optionally force the NMS be run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
decoded_boxes, box_scores,
self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = (
_generate_detections_v1(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultilevelDetectionGenerator(tf.keras.layers.Layer):
"""Generates detected boxes with scores and classes for one-stage detector."""
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v1',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
"""Initializes a multi-level detection generator.
Args:
apply_nms: A `bool` of whether or not apply non maximum suppression. If
False, the decoded boxes and their scores are returned.
pre_nms_top_k: An `int` of the number of top scores proposals to be kept
before applying NMS.
pre_nms_score_threshold: A `float` of the score threshold to apply before
applying NMS. Proposals whose scores are below this threshold are thrown
away.
nms_iou_threshold: A `float` in [0, 1], the NMS IoU threshold.
max_num_detections: An `int` of the final number of total detections to
generate.
nms_version: A string of `batched`, `v1` or `v2` specifies NMS version
use_cpu_nms: A `bool` of whether or not enforce NMS to run on CPU.
soft_nms_sigma: A `float` representing the sigma parameter for Soft NMS.
When soft_nms_sigma=0.0, we fall back to standard NMS.
**kwargs: Additional keyword arguments passed to Layer.
"""
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(MultilevelDetectionGenerator, self).__init__(**kwargs)
def _decode_multilevel_outputs(
self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Collects dict of multilevel boxes, scores, attributes into lists."""
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for att_name in raw_attributes.keys()}
else:
attributes = {}
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
for i in range(min_level, max_level + 1):
raw_boxes_i = raw_boxes[str(i)]
raw_scores_i = raw_scores[str(i)]
batch_size = tf.shape(raw_boxes_i)[0]
(_, feature_h_i, feature_w_i,
num_anchors_per_locations_times_4) = raw_boxes_i.get_shape().as_list()
num_locations = feature_h_i * feature_w_i
num_anchors_per_locations = num_anchors_per_locations_times_4 // 4
num_classes = raw_scores_i.get_shape().as_list(
)[-1] // num_anchors_per_locations
# Applies score transformation and remove the implicit background class.
scores_i = tf.sigmoid(
tf.reshape(raw_scores_i, [
batch_size, num_locations * num_anchors_per_locations, num_classes
]))
scores_i = tf.slice(scores_i, [0, 0, 1], [-1, -1, -1])
# Box decoding.
# The anchor boxes are shared for all data in a batch.
# One stage detector only supports class agnostic box regression.
anchor_boxes_i = tf.reshape(
anchor_boxes[str(i)],
[batch_size, num_locations * num_anchors_per_locations, 4])
raw_boxes_i = tf.reshape(
raw_boxes_i,
[batch_size, num_locations * num_anchors_per_locations, 4])
boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i)
# Box clipping.
boxes_i = box_ops.clip_boxes(
boxes_i, tf.expand_dims(image_shape, axis=1))
boxes.append(boxes_i)
scores.append(scores_i)
if raw_attributes:
for att_name, raw_att in raw_attributes.items():
attribute_size = raw_att[str(
i)].get_shape().as_list()[-1] // num_anchors_per_locations
att_i = tf.reshape(raw_att[str(i)], [
batch_size, num_locations * num_anchors_per_locations,
attribute_size
])
attributes[att_name].append(att_i)
boxes = tf.concat(boxes, axis=1)
boxes = tf.expand_dims(boxes, axis=2)
scores = tf.concat(scores, axis=1)
if raw_attributes:
for att_name in raw_attributes.keys():
attributes[att_name] = tf.concat(attributes[att_name], axis=1)
attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2)
return boxes, scores, attributes
def __call__(self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
"""Generates final detections.
Args:
raw_boxes: A `dict` with keys representing FPN levels and values
representing box tenors of shape `[batch, feature_h, feature_w,
num_anchors * 4]`.
raw_scores: A `dict` with keys representing FPN levels and values
representing logit tensors of shape `[batch, feature_h, feature_w,
num_anchors]`.
anchor_boxes: A `tf.Tensor` of shape of [batch_size, K, 4] representing
the corresponding anchor boxes w.r.t `box_outputs`.
image_shape: A `tf.Tensor` of shape of [batch_size, 2] storing the image
height and width w.r.t. the scaled image, i.e. the same image space as
`box_outputs` and `anchor_boxes`.
raw_attributes: If not None, a `dict` of (attribute_name,
attribute_prediction) pairs. `attribute_prediction` is a dict that
contains keys representing FPN levels and values representing tenors of
shape `[batch, feature_h, feature_w, num_anchors * attribute_size]`.
Returns:
If `apply_nms` = True, the return is a dictionary with keys:
`detection_boxes`: A `float` tf.Tensor of shape
[batch, max_num_detections, 4] representing top detected boxes in
[y1, x1, y2, x2].
`detection_scores`: A `float` tf.Tensor of shape
[batch, max_num_detections] representing sorted confidence scores for
detected boxes. The values are between [0, 1].
`detection_classes`: An `int` tf.Tensor of shape
[batch, max_num_detections] representing classes for detected boxes.
`num_detections`: An `int` tf.Tensor of shape [batch] only the first
`num_detections` boxes are valid detections
`detection_attributes`: A dict. Values of the dict is a `float`
tf.Tensor of shape [batch, max_num_detections, attribute_size]
representing attribute predictions for detected boxes.
If `apply_nms` = False, the return is a dictionary with keys:
`decoded_boxes`: A `float` tf.Tensor of shape [batch, num_raw_boxes, 4]
representing all the decoded boxes.
`decoded_box_scores`: A `float` tf.Tensor of shape
[batch, num_raw_boxes] representing socres of all the decoded boxes.
`decoded_box_attributes`: A dict. Values in the dict is a
`float` tf.Tensor of shape [batch, num_raw_boxes, attribute_size]
representing attribute predictions of all the decoded boxes.
"""
boxes, scores, attributes = self._decode_multilevel_outputs(
raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': boxes,
'decoded_box_scores': scores,
'decoded_box_attributes': attributes,
}
# Optionally force the NMS to run on CPU.
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if raw_attributes and (self._config_dict['nms_version'] != 'v1'):
raise ValueError(
'Attribute learning is only supported for NMSv1 but NMS {} is used.'
.format(self._config_dict['nms_version']))
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
boxes, scores, self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for batched NMS.
nmsed_attributes = {}
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections,
nmsed_attributes) = (
_generate_detections_v1(
boxes,
scores,
attributes=attributes if raw_attributes else None,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
boxes,
scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
# Set `nmsed_attributes` to None for v2.
nmsed_attributes = {}
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
# Adds 1 to offset the background class which has index 0.
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
'detection_attributes': nmsed_attributes,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| 45.275498
| 85
| 0.674029
|
import contextlib
from typing import List, Optional, Mapping
import tensorflow as tf
from official.vision.beta.ops import box_ops
from official.vision.beta.ops import nms
from official.vision.beta.ops import preprocess_ops
def _generate_detections_v1(boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str,
tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
with tf.name_scope('generate_detections'):
batch_size = scores.get_shape().as_list()[0]
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(batch_size):
(nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, valid_detections_i,
nmsed_att_i) = _generate_detections_per_image(
boxes[i],
scores[i],
attributes={
att_name: att[i] for att_name, att in attributes.items()
} if attributes else {},
pre_nms_top_k=pre_nms_top_k,
pre_nms_score_threshold=pre_nms_score_threshold,
nms_iou_threshold=nms_iou_threshold,
max_num_detections=max_num_detections,
soft_nms_sigma=soft_nms_sigma)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
valid_detections.append(valid_detections_i)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name].append(nmsed_att_i[att_name])
nmsed_boxes = tf.stack(nmsed_boxes, axis=0)
nmsed_scores = tf.stack(nmsed_scores, axis=0)
nmsed_classes = tf.stack(nmsed_classes, axis=0)
valid_detections = tf.stack(valid_detections, axis=0)
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.stack(nmsed_attributes[att_name], axis=0)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _generate_detections_per_image(
boxes: tf.Tensor,
scores: tf.Tensor,
attributes: Optional[Mapping[str, tf.Tensor]] = None,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
soft_nms_sigma: Optional[float] = None):
nmsed_boxes = []
nmsed_scores = []
nmsed_classes = []
num_classes_for_box = boxes.get_shape().as_list()[1]
num_classes = scores.get_shape().as_list()[1]
if attributes:
nmsed_attributes = {att_name: [] for att_name in attributes.keys()}
else:
nmsed_attributes = {}
for i in range(num_classes):
boxes_i = boxes[:, min(num_classes_for_box - 1, i)]
scores_i = scores[:, i]
scores_i, indices = tf.nn.top_k(
scores_i, k=tf.minimum(tf.shape(scores_i)[-1], pre_nms_top_k))
boxes_i = tf.gather(boxes_i, indices)
if soft_nms_sigma is not None:
(nmsed_indices_i,
nmsed_scores_i) = tf.image.non_max_suppression_with_scores(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
soft_nms_sigma=soft_nms_sigma,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_boxes_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_boxes_i, max_num_detections, 0.0)
nmsed_scores_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_scores_i, max_num_detections, -1.0)
else:
(nmsed_indices_i,
nmsed_num_valid_i) = tf.image.non_max_suppression_padded(
tf.cast(boxes_i, tf.float32),
tf.cast(scores_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_to_max_output_size=True,
name='nms_detections_' + str(i))
nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i)
nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i)
nmsed_scores_i = tf.where(
tf.less(tf.range(max_num_detections), [nmsed_num_valid_i]),
nmsed_scores_i, -tf.ones_like(nmsed_scores_i))
nmsed_classes_i = tf.fill([max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
if attributes:
for att_name, att in attributes.items():
num_classes_for_attr = att.get_shape().as_list()[1]
att_i = att[:, min(num_classes_for_attr - 1, i)]
att_i = tf.gather(att_i, indices)
nmsed_att_i = tf.gather(att_i, nmsed_indices_i)
nmsed_att_i = preprocess_ops.clip_or_pad_to_fixed_size(
nmsed_att_i, max_num_detections, 0.0)
nmsed_attributes[att_name].append(nmsed_att_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=0)
nmsed_scores = tf.concat(nmsed_scores, axis=0)
nmsed_classes = tf.concat(nmsed_classes, axis=0)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices)
nmsed_classes = tf.gather(nmsed_classes, indices)
valid_detections = tf.reduce_sum(
tf.cast(tf.greater(nmsed_scores, -1), tf.int32))
if attributes:
for att_name in attributes.keys():
nmsed_attributes[att_name] = tf.concat(nmsed_attributes[att_name], axis=0)
nmsed_attributes[att_name] = tf.gather(nmsed_attributes[att_name],
indices)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, nmsed_attributes
def _select_top_k_scores(scores_in: tf.Tensor, pre_nms_num_detections: int):
batch_size, num_anchors, num_class = scores_in.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(scores_in)[0]
scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])
scores_trans = tf.reshape(scores_trans, [-1, num_anchors])
top_k_scores, top_k_indices = tf.nn.top_k(
scores_trans, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores,
[batch_size, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices,
[batch_size, num_class, pre_nms_num_detections])
return tf.transpose(top_k_scores,
[0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1])
def _generate_detections_v2(boxes: tf.Tensor,
scores: tf.Tensor,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100):
with tf.name_scope('generate_detections'):
nmsed_boxes = []
nmsed_classes = []
nmsed_scores = []
valid_detections = []
batch_size, _, num_classes_for_box, _ = boxes.get_shape().as_list()
if batch_size is None:
batch_size = tf.shape(boxes)[0]
_, total_anchors, num_classes = scores.get_shape().as_list()
scores, indices = _select_top_k_scores(
scores, min(total_anchors, pre_nms_top_k))
for i in range(num_classes):
boxes_i = boxes[:, :, min(num_classes_for_box - 1, i), :]
scores_i = scores[:, :, i]
boxes_i = tf.gather(boxes_i, indices[:, :, i], batch_dims=1, axis=1)
boxes_i, scores_i = box_ops.filter_boxes_by_scores(
boxes_i, scores_i, min_score_threshold=pre_nms_score_threshold)
(nmsed_scores_i, nmsed_boxes_i) = nms.sorted_non_max_suppression_padded(
tf.cast(scores_i, tf.float32),
tf.cast(boxes_i, tf.float32),
max_num_detections,
iou_threshold=nms_iou_threshold)
nmsed_classes_i = tf.fill([batch_size, max_num_detections], i)
nmsed_boxes.append(nmsed_boxes_i)
nmsed_scores.append(nmsed_scores_i)
nmsed_classes.append(nmsed_classes_i)
nmsed_boxes = tf.concat(nmsed_boxes, axis=1)
nmsed_scores = tf.concat(nmsed_scores, axis=1)
nmsed_classes = tf.concat(nmsed_classes, axis=1)
nmsed_scores, indices = tf.nn.top_k(
nmsed_scores, k=max_num_detections, sorted=True)
nmsed_boxes = tf.gather(nmsed_boxes, indices, batch_dims=1, axis=1)
nmsed_classes = tf.gather(nmsed_classes, indices, batch_dims=1)
valid_detections = tf.reduce_sum(
input_tensor=tf.cast(tf.greater(nmsed_scores, -1), tf.int32), axis=1)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
def _generate_detections_batched(boxes: tf.Tensor, scores: tf.Tensor,
pre_nms_score_threshold: float,
nms_iou_threshold: float,
max_num_detections: int):
with tf.name_scope('generate_detections'):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = (
tf.image.combined_non_max_suppression(
boxes,
scores,
max_output_size_per_class=max_num_detections,
max_total_size=max_num_detections,
iou_threshold=nms_iou_threshold,
score_threshold=pre_nms_score_threshold,
pad_per_class=False,
clip_boxes=False))
nmsed_classes = tf.cast(nmsed_classes, tf.int32)
return nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections
@tf.keras.utils.register_keras_serializable(package='Vision')
class DetectionGenerator(tf.keras.layers.Layer):
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v2',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(DetectionGenerator, self).__init__(**kwargs)
def __call__(self,
raw_boxes: tf.Tensor,
raw_scores: tf.Tensor,
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
regression_weights: Optional[List[float]] = None,
bbox_per_class: bool = True):
box_scores = tf.nn.softmax(raw_scores, axis=-1)
box_scores_shape = tf.shape(box_scores)
box_scores_shape_list = box_scores.get_shape().as_list()
batch_size = box_scores_shape[0]
num_locations = box_scores_shape_list[1]
num_classes = box_scores_shape_list[-1]
box_scores = tf.slice(box_scores, [0, 0, 1], [-1, -1, -1])
if bbox_per_class:
num_detections = num_locations * (num_classes - 1)
raw_boxes = tf.reshape(raw_boxes,
[batch_size, num_locations, num_classes, 4])
raw_boxes = tf.slice(raw_boxes, [0, 0, 1, 0], [-1, -1, -1, -1])
anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1])
raw_boxes = tf.reshape(raw_boxes, [batch_size, num_detections, 4])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
decoded_boxes = box_ops.decode_boxes(
raw_boxes, anchor_boxes, weights=regression_weights)
decoded_boxes = box_ops.clip_boxes(
decoded_boxes, tf.expand_dims(image_shape, axis=1))
if bbox_per_class:
decoded_boxes = tf.reshape(
decoded_boxes, [batch_size, num_locations, num_classes - 1, 4])
else:
decoded_boxes = tf.expand_dims(decoded_boxes, axis=2)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': decoded_boxes,
'decoded_box_scores': box_scores,
}
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
decoded_boxes, box_scores,
self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections, _) = (
_generate_detections_v1(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
decoded_boxes,
box_scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MultilevelDetectionGenerator(tf.keras.layers.Layer):
def __init__(self,
apply_nms: bool = True,
pre_nms_top_k: int = 5000,
pre_nms_score_threshold: float = 0.05,
nms_iou_threshold: float = 0.5,
max_num_detections: int = 100,
nms_version: str = 'v1',
use_cpu_nms: bool = False,
soft_nms_sigma: Optional[float] = None,
**kwargs):
self._config_dict = {
'apply_nms': apply_nms,
'pre_nms_top_k': pre_nms_top_k,
'pre_nms_score_threshold': pre_nms_score_threshold,
'nms_iou_threshold': nms_iou_threshold,
'max_num_detections': max_num_detections,
'nms_version': nms_version,
'use_cpu_nms': use_cpu_nms,
'soft_nms_sigma': soft_nms_sigma,
}
super(MultilevelDetectionGenerator, self).__init__(**kwargs)
def _decode_multilevel_outputs(
self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
boxes = []
scores = []
if raw_attributes:
attributes = {att_name: [] for att_name in raw_attributes.keys()}
else:
attributes = {}
levels = list(raw_boxes.keys())
min_level = int(min(levels))
max_level = int(max(levels))
for i in range(min_level, max_level + 1):
raw_boxes_i = raw_boxes[str(i)]
raw_scores_i = raw_scores[str(i)]
batch_size = tf.shape(raw_boxes_i)[0]
(_, feature_h_i, feature_w_i,
num_anchors_per_locations_times_4) = raw_boxes_i.get_shape().as_list()
num_locations = feature_h_i * feature_w_i
num_anchors_per_locations = num_anchors_per_locations_times_4 // 4
num_classes = raw_scores_i.get_shape().as_list(
)[-1] // num_anchors_per_locations
scores_i = tf.sigmoid(
tf.reshape(raw_scores_i, [
batch_size, num_locations * num_anchors_per_locations, num_classes
]))
scores_i = tf.slice(scores_i, [0, 0, 1], [-1, -1, -1])
anchor_boxes_i = tf.reshape(
anchor_boxes[str(i)],
[batch_size, num_locations * num_anchors_per_locations, 4])
raw_boxes_i = tf.reshape(
raw_boxes_i,
[batch_size, num_locations * num_anchors_per_locations, 4])
boxes_i = box_ops.decode_boxes(raw_boxes_i, anchor_boxes_i)
boxes_i = box_ops.clip_boxes(
boxes_i, tf.expand_dims(image_shape, axis=1))
boxes.append(boxes_i)
scores.append(scores_i)
if raw_attributes:
for att_name, raw_att in raw_attributes.items():
attribute_size = raw_att[str(
i)].get_shape().as_list()[-1] // num_anchors_per_locations
att_i = tf.reshape(raw_att[str(i)], [
batch_size, num_locations * num_anchors_per_locations,
attribute_size
])
attributes[att_name].append(att_i)
boxes = tf.concat(boxes, axis=1)
boxes = tf.expand_dims(boxes, axis=2)
scores = tf.concat(scores, axis=1)
if raw_attributes:
for att_name in raw_attributes.keys():
attributes[att_name] = tf.concat(attributes[att_name], axis=1)
attributes[att_name] = tf.expand_dims(attributes[att_name], axis=2)
return boxes, scores, attributes
def __call__(self,
raw_boxes: Mapping[str, tf.Tensor],
raw_scores: Mapping[str, tf.Tensor],
anchor_boxes: tf.Tensor,
image_shape: tf.Tensor,
raw_attributes: Optional[Mapping[str, tf.Tensor]] = None):
boxes, scores, attributes = self._decode_multilevel_outputs(
raw_boxes, raw_scores, anchor_boxes, image_shape, raw_attributes)
if not self._config_dict['apply_nms']:
return {
'decoded_boxes': boxes,
'decoded_box_scores': scores,
'decoded_box_attributes': attributes,
}
if self._config_dict['use_cpu_nms']:
nms_context = tf.device('cpu:0')
else:
nms_context = contextlib.nullcontext()
with nms_context:
if raw_attributes and (self._config_dict['nms_version'] != 'v1'):
raise ValueError(
'Attribute learning is only supported for NMSv1 but NMS {} is used.'
.format(self._config_dict['nms_version']))
if self._config_dict['nms_version'] == 'batched':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_batched(
boxes, scores, self._config_dict['pre_nms_score_threshold'],
self._config_dict['nms_iou_threshold'],
self._config_dict['max_num_detections']))
nmsed_attributes = {}
elif self._config_dict['nms_version'] == 'v1':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections,
nmsed_attributes) = (
_generate_detections_v1(
boxes,
scores,
attributes=attributes if raw_attributes else None,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections'],
soft_nms_sigma=self._config_dict['soft_nms_sigma']))
elif self._config_dict['nms_version'] == 'v2':
(nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections) = (
_generate_detections_v2(
boxes,
scores,
pre_nms_top_k=self._config_dict['pre_nms_top_k'],
pre_nms_score_threshold=self
._config_dict['pre_nms_score_threshold'],
nms_iou_threshold=self._config_dict['nms_iou_threshold'],
max_num_detections=self._config_dict['max_num_detections']))
nmsed_attributes = {}
else:
raise ValueError('NMS version {} not supported.'.format(
self._config_dict['nms_version']))
nmsed_classes += 1
return {
'num_detections': valid_detections,
'detection_boxes': nmsed_boxes,
'detection_classes': nmsed_classes,
'detection_scores': nmsed_scores,
'detection_attributes': nmsed_attributes,
}
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config):
return cls(**config)
| true
| true
|
79072d0993a2a9e2468a7e746218cce0e5832973
| 3,334
|
py
|
Python
|
examples/django-test-app/project/settings.py
|
zmiklank/s2i-python-container
|
efa47c5af11a98df18ce7c905332149770f938c3
|
[
"Apache-2.0"
] | null | null | null |
examples/django-test-app/project/settings.py
|
zmiklank/s2i-python-container
|
efa47c5af11a98df18ce7c905332149770f938c3
|
[
"Apache-2.0"
] | null | null | null |
examples/django-test-app/project/settings.py
|
zmiklank/s2i-python-container
|
efa47c5af11a98df18ce7c905332149770f938c3
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import django
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y*b^6p#z&cm2)8rzgbp2i4k*+rg2h%60l*bmf6hg&ro!z0-ael'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: do not use '*' on production or use some HTTP(S) proxy
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
# Django 1
if django.VERSION[0] == 1:
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
else:
# Django 2+
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
| 28.016807
| 74
| 0.695861
|
import django
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'y*b^6p#z&cm2)8rzgbp2i4k*+rg2h%60l*bmf6hg&ro!z0-ael'
DEBUG = True
# SECURITY WARNING: do not use '*' on production or use some HTTP(S) proxy
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
# Django 1
if django.VERSION[0] == 1:
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
else:
# Django 2+
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
| true
| true
|
79072d403b5317b350eabb968cb7bc42d90b1b98
| 3,419
|
py
|
Python
|
Sketches/RJL/bittorrent/BitTorrent/BitTorrent/BeautifulSupe.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/RJL/bittorrent/BitTorrent/BitTorrent/BeautifulSupe.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 17
|
2015-01-05T21:06:22.000Z
|
2015-12-07T20:45:44.000Z
|
Sketches/RJL/bittorrent/BitTorrent/BitTorrent/BeautifulSupe.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 7
|
2015-07-28T09:17:17.000Z
|
2021-11-07T02:29:41.000Z
|
# A very very minimal BeautifulSoup immitation.
#
# BS uses SGMLlib to parse, which converts everything to lower case.
# This uses real xml parsing to mimic the parts of BS we use.
import xml.dom.minidom
def _getText(node):
nodelist = node.childNodes
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(str(node.data))
return rc
def _getNodesAsTags(root):
nodelist = root.childNodes
tags = []
for node in nodelist:
if node.nodeType == node.ELEMENT_NODE:
tags.append(Tag(node))
return tags
class Tag(object):
def __init__(self, node):
self.node = node
self.name = node.nodeName
self.contents = _getNodesAsTags(self.node)
text = _getText(self.node)
self.contents += text
self.text = ''.join(text)
def child_elements(self):
children = []
for tag in self.contents:
if isinstance(tag, Tag):
children.append(tag)
return children
def get(self, tagname):
got = self.first(tagname)
if got:
return got.text
def first(self, tagname):
found = None
for tag in self.contents:
if isinstance(tag, Tag):
if tag.name == tagname:
found = tag
break
return found
class BeautifulSupe(object):
def __init__(self, data):
#please don't give us your null terminators
data = data.strip(chr(0))
self.dom = xml.dom.minidom.parseString(data)
def first(self, tagname, root = None):
found = None
if root == None:
e = self.dom.getElementsByTagName(tagname)
if len(e) > 0:
found = e[0]
else:
for node in root.childNodes:
if node.nodeName == tagname:
found = node
break
if not found:
return None
tag = Tag(found)
return tag
def fetch(self, tagname, restraints = {}):
e = self.dom.getElementsByTagName(tagname)
matches = []
for node in e:
match = 1
for restraint in restraints:
f = self.first(restraint, node)
if not f:
match = 0
break
text = restraints[restraint]
if not f.contents[0].startswith(text):
match = 0
break
if match:
tag = Tag(node)
matches.append(tag)
return matches
def scour(self, prefix, suffix = None, node = None):
if node is None:
root = self.dom.getElementsByTagName(self.dom.documentElement.tagName)[0]
node = root
matches = []
for node in node.childNodes:
match = 0
name = node.nodeName
if name.startswith(prefix):
if suffix:
if name.endswith(suffix):
match = 1
else:
match = 1
if match:
tag = Tag(node)
matches.append(tag)
matches += self.scour(prefix, suffix, node)
return matches
| 25.706767
| 85
| 0.497221
|
import xml.dom.minidom
def _getText(node):
nodelist = node.childNodes
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(str(node.data))
return rc
def _getNodesAsTags(root):
nodelist = root.childNodes
tags = []
for node in nodelist:
if node.nodeType == node.ELEMENT_NODE:
tags.append(Tag(node))
return tags
class Tag(object):
def __init__(self, node):
self.node = node
self.name = node.nodeName
self.contents = _getNodesAsTags(self.node)
text = _getText(self.node)
self.contents += text
self.text = ''.join(text)
def child_elements(self):
children = []
for tag in self.contents:
if isinstance(tag, Tag):
children.append(tag)
return children
def get(self, tagname):
got = self.first(tagname)
if got:
return got.text
def first(self, tagname):
found = None
for tag in self.contents:
if isinstance(tag, Tag):
if tag.name == tagname:
found = tag
break
return found
class BeautifulSupe(object):
def __init__(self, data):
data = data.strip(chr(0))
self.dom = xml.dom.minidom.parseString(data)
def first(self, tagname, root = None):
found = None
if root == None:
e = self.dom.getElementsByTagName(tagname)
if len(e) > 0:
found = e[0]
else:
for node in root.childNodes:
if node.nodeName == tagname:
found = node
break
if not found:
return None
tag = Tag(found)
return tag
def fetch(self, tagname, restraints = {}):
e = self.dom.getElementsByTagName(tagname)
matches = []
for node in e:
match = 1
for restraint in restraints:
f = self.first(restraint, node)
if not f:
match = 0
break
text = restraints[restraint]
if not f.contents[0].startswith(text):
match = 0
break
if match:
tag = Tag(node)
matches.append(tag)
return matches
def scour(self, prefix, suffix = None, node = None):
if node is None:
root = self.dom.getElementsByTagName(self.dom.documentElement.tagName)[0]
node = root
matches = []
for node in node.childNodes:
match = 0
name = node.nodeName
if name.startswith(prefix):
if suffix:
if name.endswith(suffix):
match = 1
else:
match = 1
if match:
tag = Tag(node)
matches.append(tag)
matches += self.scour(prefix, suffix, node)
return matches
| true
| true
|
79072d68a7e2cd63b1a041a77c3242738c18fde5
| 2,641
|
py
|
Python
|
code/doiainn/doiainn/settings.py
|
bbenko/doiainn
|
feba5f963ee8018b9cf79b42f97a7f31af2e5583
|
[
"MIT"
] | null | null | null |
code/doiainn/doiainn/settings.py
|
bbenko/doiainn
|
feba5f963ee8018b9cf79b42f97a7f31af2e5583
|
[
"MIT"
] | null | null | null |
code/doiainn/doiainn/settings.py
|
bbenko/doiainn
|
feba5f963ee8018b9cf79b42f97a7f31af2e5583
|
[
"MIT"
] | null | null | null |
"""
Django settings for doiainn project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fbrywz7o3a1=vf-+4luwn5h)!kt-xzghqtm#^3(epwcwcp^jws'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'doiainn.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'doiainn.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| 25.640777
| 71
| 0.702385
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'fbrywz7o3a1=vf-+4luwn5h)!kt-xzghqtm#^3(epwcwcp^jws'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'doiainn.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'doiainn.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| true
| true
|
79072e5fc4433edc8f0a0a35a9c43e91bb6b764d
| 7,248
|
py
|
Python
|
macro_lib/growth/solow.py
|
zhaoy17/Macro_lib
|
44c1fd16ae139bbfe6616d1bdca55420fd1695f7
|
[
"Apache-2.0"
] | 2
|
2020-03-24T07:02:20.000Z
|
2020-03-24T07:02:27.000Z
|
macro_lib/growth/solow.py
|
zhaoy17/Macro_lib
|
44c1fd16ae139bbfe6616d1bdca55420fd1695f7
|
[
"Apache-2.0"
] | null | null | null |
macro_lib/growth/solow.py
|
zhaoy17/Macro_lib
|
44c1fd16ae139bbfe6616d1bdca55420fd1695f7
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import matlibplot.pyplot as plt
'''
Simulating Solow-Swan model, which attempts to model the long-run economic growth
by looking at capital accumulation (K), population growth (L) and technological
progress, which results in increase in productivity. It models the total production
of the economy using the constant-returns-to-scale Cobb-Douglas production function
Y(t) = K(t)^{alpha} * (A(t)L(t))^{1-alpha}, where
Y(t): a single good output at time t
K(t): the amount of capital at time t
L(t): population at time t
A(t): total factor productivity at time t
alpha: output elasticity of capital
with a law of motion:
I(t) = sY(t)
C(t) = (1-s)Y(t)
K(t+1) = (1-delta)K(t) + I(t)
L(t+1) = (1+n)L(t)
we can derive the law of motion for k(t) capital per capita:
k(t+1) = K(t+1)/N(t+1)
= ((1-delta)K(t) + I(t))/ (1+n)N(t)
= (1-delta)/(1+n) * k(t) + s/(1+n) A*K_t^alpha
as well as per capita output:
y(t) = Y(t)/N(t)
= Ak_t^alpha
where, I(t): total investment at time t
C(t): total consumption at time t
K(t): total capital at time t
L(t): total population at time t
s: the saving rate
delta: rate of capital depreciation
n: rate of population growth
This simulation allows user to take controls of those parameters and plot the simulated
total output growth. The program also enables user to query data from the Federal Reserve
Economic Data
'''
class solow:
'''
A: total factor productivity
k0: the initial amount of capital
delta: rate of depreciation of cpiatal
s: the saving rate
n: the population growth rate
alpha: output elasticity of capital
starting_year:
'''
def __init__(self, A=2.87, k0=3.5, delta = 0.08, s = 0.1, n = 0.015, alpha = 0.36, t0 = 1956, tmax = 2060):
self._A = A
self._k0 = k0
self._k = k0
self._delta = delta
self._s = s
self._n = n
self._alpha = alpha
self._t0 = t0
self._tmax = tmax
self._t = range(t0, tmax + 1)
self._y = np.zeros(len(self._t))
self._y[0] = self._A * (self._k0 ** self._alpha)
self._time_passed = 0
'''
this method returns all the variables in this model, which includes A, k0,
delta, s, n, alpha, t0, tax, Y, and t as a dictionary
'''
def get_variables(self):
return {
'A' : self._A,
'k0': self._k0,
'delta': self._delta,
's' : self._s,
'n' : self._n,
'alpha': self._alpha,
't0' : self._t0,
'tmax': self._tmax,
'y' : self._y,
't' : self._t }
'''
this method takes a list or dictionary as input and set the variables based on
the user's input. If the user inputs a list, it will treats the entries of list
as the values of A, k0, delta, s, n, alpha, t0, tmax, Y, t the user wants to
change into. If the user inputs a dictionary, the fields will be set according
to the keys.
Example:
set_variables({A: 2.87, k0: 3.5, delta:0.08, s:0.1, n:0.015, alpha:0.36, t0:1956, tmax:2060})
set_variables(2.87,3.5,0.08,0.1,0.015,0.36,1956,2060)
both achieve the same output
'''
def set_variables(self, vars):
if (type(vars) != type([]) or type(vars) != type({})):
raise ValueError('arguments must be either a dictionary or a list')
if (type(vars) == type([])):
if (len(vars) != 8):
raise ValueError('You must enter the following arguments: A, k0, delta, s, n, alpha, t0, tmax')
else:
self.setA(vars[0])
self.setK0(vars[1])
self.setDelta(vars[2])
self.setS(vars[3])
self.setN(vars[4])
self.setAlpha(vars[5])
self.setTRange(vars[6], vars[7])
if (type(vars) == type({})):
try:
self.setA(vars['A'])
self.setK0(vars['k0'])
self.setDelta(vars['delta'])
self.setS(vars['s'])
self.setN(vars['n'])
self.setAlpha(vars['alpha'])
self.setTRange(vars['t0'], vars['tmax'])
except KeyError:
raise ValueError("Your dictionary must have the keys A, k0, delta, s, n, alpha, t0, and tmax")
'''
setter for the field A (total factor productivity)
'''
def setA(self, A):
if (A < 0):
raise ValueError("A must be positive")
self._A = A
'''
setter for the field k0 (the initial amount of capital)
'''
def setK0(self,k0):
if(k0 < 0):
raise ValueError("k0 must be positive")
'''
setter for Delta (rate of depreciation of cpiatal)
'''
def setDelta(self, delta):
if (delta > 1 or delta < 0):
raise ValueError("depreciation rate must be in between 0 and 1")
self._delta = delta
'''
setter for S (saving rate)
'''
def setS(self, s):
if (s > 1 or s < 0):
raise ValueError("saving rate must be in between 0 and 1")
self.S = S
'''
setter for N (population growth rate)
'''
def setN(self,n):
self._n = n
'''
setter for alpha (output elasticity of capital)
'''
def setAlpha(self, alpha):
if (alpha < 0 or alpha > 1):
raise ValueError("alpha must be in between 0 and 1")
self._alpha = alpha
'''
setter for the time range
Example:
setTRange(1956, 2060): set the time range starting from 1956 to 2060
'''
def setTRange(self, start, end):
if (end < start):
raise ValueError("tmax must be greater than t0")
self._t0 = start
self._tmax = end
self._t = range(start, end+1)
'''
Start the simulation, and return the predicted value of Y
from the start period to the end period
TO BE IMPLEMENTED
'''
def simulate(self):
for t in self._t:
self._update()
return [self._y, self._t]
'''
Plot the prediction using matlibplot. x-axis would be year, y-axis would
the predicted GDP
TO BE IMPLEMENTED
'''
def plot(self):
pass
'''
store the output as a pandas dataframe
'''
def to_df(self):
return pd.DataFrame({'year' : self._t, 'gdp_per_capita' : self._y})
'''
export the output as a csv file to the user-provided location
TO BE IMPLEMENTED
'''
def to_csv(self, dir):
pass
'''
lunch the GUI, that enables more user-friendly interaction with the software
TO BE IMPLEMENTED
'''
def gui(self):
pass
'''
update all the fields according to the law of motion
TO BE IMPLEMENTED
'''
def _update(self):
#update k
self._k = (1-self._delta)/(1+self._n) * self._k + (self._s)/(1+n) * self._A * (self._k ** self._alpha)
# update t
self._time_passed += 1
#update y
self._y[self._time_passed] = self._A * (self._k ** self._alpha)
| 30.453782
| 111
| 0.560706
|
import numpy as np
import pandas as pd
import matlibplot.pyplot as plt
class solow:
def __init__(self, A=2.87, k0=3.5, delta = 0.08, s = 0.1, n = 0.015, alpha = 0.36, t0 = 1956, tmax = 2060):
self._A = A
self._k0 = k0
self._k = k0
self._delta = delta
self._s = s
self._n = n
self._alpha = alpha
self._t0 = t0
self._tmax = tmax
self._t = range(t0, tmax + 1)
self._y = np.zeros(len(self._t))
self._y[0] = self._A * (self._k0 ** self._alpha)
self._time_passed = 0
def get_variables(self):
return {
'A' : self._A,
'k0': self._k0,
'delta': self._delta,
's' : self._s,
'n' : self._n,
'alpha': self._alpha,
't0' : self._t0,
'tmax': self._tmax,
'y' : self._y,
't' : self._t }
def set_variables(self, vars):
if (type(vars) != type([]) or type(vars) != type({})):
raise ValueError('arguments must be either a dictionary or a list')
if (type(vars) == type([])):
if (len(vars) != 8):
raise ValueError('You must enter the following arguments: A, k0, delta, s, n, alpha, t0, tmax')
else:
self.setA(vars[0])
self.setK0(vars[1])
self.setDelta(vars[2])
self.setS(vars[3])
self.setN(vars[4])
self.setAlpha(vars[5])
self.setTRange(vars[6], vars[7])
if (type(vars) == type({})):
try:
self.setA(vars['A'])
self.setK0(vars['k0'])
self.setDelta(vars['delta'])
self.setS(vars['s'])
self.setN(vars['n'])
self.setAlpha(vars['alpha'])
self.setTRange(vars['t0'], vars['tmax'])
except KeyError:
raise ValueError("Your dictionary must have the keys A, k0, delta, s, n, alpha, t0, and tmax")
def setA(self, A):
if (A < 0):
raise ValueError("A must be positive")
self._A = A
def setK0(self,k0):
if(k0 < 0):
raise ValueError("k0 must be positive")
def setDelta(self, delta):
if (delta > 1 or delta < 0):
raise ValueError("depreciation rate must be in between 0 and 1")
self._delta = delta
def setS(self, s):
if (s > 1 or s < 0):
raise ValueError("saving rate must be in between 0 and 1")
self.S = S
def setN(self,n):
self._n = n
def setAlpha(self, alpha):
if (alpha < 0 or alpha > 1):
raise ValueError("alpha must be in between 0 and 1")
self._alpha = alpha
def setTRange(self, start, end):
if (end < start):
raise ValueError("tmax must be greater than t0")
self._t0 = start
self._tmax = end
self._t = range(start, end+1)
def simulate(self):
for t in self._t:
self._update()
return [self._y, self._t]
def plot(self):
pass
def to_df(self):
return pd.DataFrame({'year' : self._t, 'gdp_per_capita' : self._y})
def to_csv(self, dir):
pass
def gui(self):
pass
def _update(self):
self._k = (1-self._delta)/(1+self._n) * self._k + (self._s)/(1+n) * self._A * (self._k ** self._alpha)
self._time_passed += 1
self._y[self._time_passed] = self._A * (self._k ** self._alpha)
| true
| true
|
79072ecbaf7146f0b35ba3fb0dc12f5ddb30f1d3
| 506
|
py
|
Python
|
nexus/bot/handlers/__init__.py
|
RobbiNespu/hyperboria
|
7db858386f1a20e8d49bc16f53bfd7f1e4d03f7e
|
[
"Unlicense"
] | 54
|
2021-01-07T03:02:36.000Z
|
2022-03-28T17:19:29.000Z
|
nexus/bot/handlers/__init__.py
|
the-superpirate/hyperboria
|
74776166158d07b199677f9738862e5f1fa54367
|
[
"Unlicense"
] | 10
|
2021-01-08T17:38:59.000Z
|
2022-02-28T14:34:45.000Z
|
nexus/bot/handlers/__init__.py
|
the-superpirate/hyperboria
|
74776166158d07b199677f9738862e5f1fa54367
|
[
"Unlicense"
] | 16
|
2020-12-28T18:31:44.000Z
|
2022-02-22T15:00:53.000Z
|
from . import (
admin,
ban,
close,
contact,
copyright,
donate,
download,
emoji,
help,
legacy,
noop,
roll,
search,
settings,
shortlink,
start,
stop,
submit,
top_missed,
view,
vote,
)
__all__ = ['admin', 'ban', 'contact', 'copyright', 'close', 'donate', 'download', 'emoji', 'help',
'legacy', 'noop', 'roll', 'search', 'settings',
'shortlink', 'start', 'stop', 'submit', 'top_missed', 'view', 'vote']
| 18.071429
| 98
| 0.51581
|
from . import (
admin,
ban,
close,
contact,
copyright,
donate,
download,
emoji,
help,
legacy,
noop,
roll,
search,
settings,
shortlink,
start,
stop,
submit,
top_missed,
view,
vote,
)
__all__ = ['admin', 'ban', 'contact', 'copyright', 'close', 'donate', 'download', 'emoji', 'help',
'legacy', 'noop', 'roll', 'search', 'settings',
'shortlink', 'start', 'stop', 'submit', 'top_missed', 'view', 'vote']
| true
| true
|
79072f542ccf13bca8fa1c484ef91e52bfb5242f
| 5,959
|
py
|
Python
|
malss/app/learning_curve.py
|
canard0328/malss
|
976ebdb6e4bee52a0dbb65e0ddeed767cfe39591
|
[
"MIT"
] | 37
|
2015-02-22T20:12:20.000Z
|
2021-02-05T11:12:28.000Z
|
malss/app/learning_curve.py
|
canard0328/malss
|
976ebdb6e4bee52a0dbb65e0ddeed767cfe39591
|
[
"MIT"
] | 8
|
2015-01-07T14:53:41.000Z
|
2018-02-11T08:00:19.000Z
|
malss/app/learning_curve.py
|
canard0328/malss
|
976ebdb6e4bee52a0dbb65e0ddeed767cfe39591
|
[
"MIT"
] | 7
|
2015-01-08T14:53:26.000Z
|
2020-07-26T13:03:10.000Z
|
# coding: utf-8
import os
import numpy as np
import copy
from PyQt5.QtWidgets import (QPushButton, QScrollArea)
from PyQt5.QtCore import QThread, pyqtSignal
from multiprocessing import Process, Manager
from ..malss import MALSS
from .waiting_animation import WaitingAnimation
from .rfpimp import oob_importances
from .learning_curve_base import LearningCurveBase
class LearningCurve(LearningCurveBase):
def __init__(self, parent=None, button_func=None, params=None):
super().__init__(parent, 'LearningCurve', params)
self.button_func = button_func
path = os.path.abspath(os.path.dirname(__file__)) + '/static/'
path1 = path + 'check_curve'
text = self.get_text(path1)
if self.params.lang == 'en':
self.set_paragraph('', text=text)
else:
self.set_paragraph('', text=text)
self.plot_curve(self.params.results['algorithms'])
self.vbox.addStretch()
btn_fs = QPushButton('Try feature selection', self.inner)
btn_fs.setStyleSheet('QPushButton{font: bold; font-size: 15pt; background-color: white;};')
btn_fs.clicked.connect(self.__button_clicked)
self.btn_next = QPushButton('Continue', self.inner)
self.btn_next.setStyleSheet('QPushButton{font: bold; font-size: 15pt; background-color: white;};')
if self.params.lang == 'en':
self.btn_next.clicked.connect(lambda: self.button_func(
'Prediction'))
else:
self.btn_next.clicked.connect(lambda: self.button_func(
'予測'))
self.vbox.addWidget(btn_fs)
self.vbox.addWidget(self.btn_next)
# "parent.parent()" must be modified.
self.wait_ani = WaitingAnimation(parent.parent())
self.wait_ani.hide()
lists = ['task', 'supervised_learning', 'dummy', 'hyperparameter',
'overfitting', 'cross_validation', 'learning_curve',
'bias_variance']
if self.params.lang == 'jp':
lists = [l + '_jp' for l in lists]
else:
lists = [l + '_en' for l in lists]
self.wait_ani.set_lists(lists)
def resizeEvent(self, event):
# To be modified.
self.wait_ani.resize(self.parent().parent().size())
event.accept()
QScrollArea.resizeEvent(self, event)
def __button_clicked(self):
self.__feature_selection()
def __feature_selection(self):
self.mdl_fs = copy.deepcopy(self.params.mdl)
self.thread = FeatureSelectionWorker(self.mdl_fs)
self.thread.finSignal.connect(self.__feature_selected)
self.thread.start()
self.wait_ani.show()
def __feature_selected(self, signalData):
self.wait_ani.hide()
if 'error' in signalData:
self.params.error = signalData['error']
self.button_func('Error')
else:
if len(signalData['mdl'].data.X.columns) < len(self.params.X.columns):
# some features deleted
self.params.X_fs = signalData['mdl'].data.X
self.params.mdl_fs = signalData['mdl']
self.params.algorithms_fs = self.params.mdl_fs.get_algorithms()
if self.params.lang == 'en':
self.button_func('Feature selection')
else:
self.button_func('特徴量選択')
else:
# no features deleted
self.params.not_deleted = True
if self.params.lang == 'en':
self.button_func('Prediction')
else:
self.button_func('予測')
class LearningCurve2(LearningCurveBase):
def __init__(self, parent=None, button_func=None, params=None):
super().__init__(parent, 'LearningCurve 2', params)
self.button_func = button_func
path = os.path.abspath(os.path.dirname(__file__)) + '/static/'
path1 = path + 'learning_curve_2'
text = self.get_text(path1)
if self.params.lang == 'en':
self.set_paragraph('', text=text)
else:
self.set_paragraph('', text=text)
self.plot_curve(self.params.results_fs['algorithms'])
if self.params.lang == 'en':
text = ('Finally, MALSS output analysis results, and you can '
'predict unknown data (if you have).\n'
'Press "Next" to continue.')
self.set_paragraph('', text=text)
else:
text = ('最後に学習結果の出力と,未知データがあればその予測を'
'行いましょう.\nNextを押してください')
self.set_paragraph('', text=text)
self.vbox.addStretch()
self.btn_next = QPushButton('Next', self.inner)
self.btn_next.setStyleSheet('QPushButton{font: bold; font-size: 15pt; background-color: white;};')
if self.params.lang == 'en':
self.btn_next.clicked.connect(lambda: self.button_func(
'Prediction'))
else:
self.btn_next.clicked.connect(lambda: self.button_func(
'予測'))
self.vbox.addWidget(self.btn_next)
class FeatureSelectionWorker(QThread):
finSignal = pyqtSignal(dict)
def __init__(self, mdl):
super().__init__()
self.mdl = mdl
def run(self):
with Manager() as manager:
d = manager.dict()
job = Process(target=FeatureSelectionWorker.sub_job,
args=(self.mdl, d))
job.start()
job.join()
self.finSignal.emit(dict(d))
@staticmethod
def sub_job(mdl, d):
try:
mdl.select_features()
d['mdl'] = mdl
except Exception as e:
import traceback
d['error'] = traceback.format_exc()
| 34.847953
| 107
| 0.5756
|
import os
import numpy as np
import copy
from PyQt5.QtWidgets import (QPushButton, QScrollArea)
from PyQt5.QtCore import QThread, pyqtSignal
from multiprocessing import Process, Manager
from ..malss import MALSS
from .waiting_animation import WaitingAnimation
from .rfpimp import oob_importances
from .learning_curve_base import LearningCurveBase
class LearningCurve(LearningCurveBase):
def __init__(self, parent=None, button_func=None, params=None):
super().__init__(parent, 'LearningCurve', params)
self.button_func = button_func
path = os.path.abspath(os.path.dirname(__file__)) + '/static/'
path1 = path + 'check_curve'
text = self.get_text(path1)
if self.params.lang == 'en':
self.set_paragraph('', text=text)
else:
self.set_paragraph('', text=text)
self.plot_curve(self.params.results['algorithms'])
self.vbox.addStretch()
btn_fs = QPushButton('Try feature selection', self.inner)
btn_fs.setStyleSheet('QPushButton{font: bold; font-size: 15pt; background-color: white;};')
btn_fs.clicked.connect(self.__button_clicked)
self.btn_next = QPushButton('Continue', self.inner)
self.btn_next.setStyleSheet('QPushButton{font: bold; font-size: 15pt; background-color: white;};')
if self.params.lang == 'en':
self.btn_next.clicked.connect(lambda: self.button_func(
'Prediction'))
else:
self.btn_next.clicked.connect(lambda: self.button_func(
'予測'))
self.vbox.addWidget(btn_fs)
self.vbox.addWidget(self.btn_next)
self.wait_ani = WaitingAnimation(parent.parent())
self.wait_ani.hide()
lists = ['task', 'supervised_learning', 'dummy', 'hyperparameter',
'overfitting', 'cross_validation', 'learning_curve',
'bias_variance']
if self.params.lang == 'jp':
lists = [l + '_jp' for l in lists]
else:
lists = [l + '_en' for l in lists]
self.wait_ani.set_lists(lists)
def resizeEvent(self, event):
self.wait_ani.resize(self.parent().parent().size())
event.accept()
QScrollArea.resizeEvent(self, event)
def __button_clicked(self):
self.__feature_selection()
def __feature_selection(self):
self.mdl_fs = copy.deepcopy(self.params.mdl)
self.thread = FeatureSelectionWorker(self.mdl_fs)
self.thread.finSignal.connect(self.__feature_selected)
self.thread.start()
self.wait_ani.show()
def __feature_selected(self, signalData):
self.wait_ani.hide()
if 'error' in signalData:
self.params.error = signalData['error']
self.button_func('Error')
else:
if len(signalData['mdl'].data.X.columns) < len(self.params.X.columns):
self.params.X_fs = signalData['mdl'].data.X
self.params.mdl_fs = signalData['mdl']
self.params.algorithms_fs = self.params.mdl_fs.get_algorithms()
if self.params.lang == 'en':
self.button_func('Feature selection')
else:
self.button_func('特徴量選択')
else:
self.params.not_deleted = True
if self.params.lang == 'en':
self.button_func('Prediction')
else:
self.button_func('予測')
class LearningCurve2(LearningCurveBase):
def __init__(self, parent=None, button_func=None, params=None):
super().__init__(parent, 'LearningCurve 2', params)
self.button_func = button_func
path = os.path.abspath(os.path.dirname(__file__)) + '/static/'
path1 = path + 'learning_curve_2'
text = self.get_text(path1)
if self.params.lang == 'en':
self.set_paragraph('', text=text)
else:
self.set_paragraph('', text=text)
self.plot_curve(self.params.results_fs['algorithms'])
if self.params.lang == 'en':
text = ('Finally, MALSS output analysis results, and you can '
'predict unknown data (if you have).\n'
'Press "Next" to continue.')
self.set_paragraph('', text=text)
else:
text = ('最後に学習結果の出力と,未知データがあればその予測を'
'行いましょう.\nNextを押してください')
self.set_paragraph('', text=text)
self.vbox.addStretch()
self.btn_next = QPushButton('Next', self.inner)
self.btn_next.setStyleSheet('QPushButton{font: bold; font-size: 15pt; background-color: white;};')
if self.params.lang == 'en':
self.btn_next.clicked.connect(lambda: self.button_func(
'Prediction'))
else:
self.btn_next.clicked.connect(lambda: self.button_func(
'予測'))
self.vbox.addWidget(self.btn_next)
class FeatureSelectionWorker(QThread):
finSignal = pyqtSignal(dict)
def __init__(self, mdl):
super().__init__()
self.mdl = mdl
def run(self):
with Manager() as manager:
d = manager.dict()
job = Process(target=FeatureSelectionWorker.sub_job,
args=(self.mdl, d))
job.start()
job.join()
self.finSignal.emit(dict(d))
@staticmethod
def sub_job(mdl, d):
try:
mdl.select_features()
d['mdl'] = mdl
except Exception as e:
import traceback
d['error'] = traceback.format_exc()
| true
| true
|
79072fc503eadda6e5ae8defe25b3a7ba294b2e8
| 455
|
py
|
Python
|
setup_python_package/queries/get_package_author_name.py
|
LucaCappelletti94/setup_python_package
|
61b5f3cff1ed3181f932293c63c4fcb71cbe0062
|
[
"MIT"
] | 5
|
2019-09-17T14:46:35.000Z
|
2020-06-06T08:17:02.000Z
|
setup_python_package/queries/get_package_author_name.py
|
LucaCappelletti94/setup_python_package
|
61b5f3cff1ed3181f932293c63c4fcb71cbe0062
|
[
"MIT"
] | 2
|
2020-12-18T01:47:55.000Z
|
2020-12-25T10:08:30.000Z
|
setup_python_package/queries/get_package_author_name.py
|
LucaCappelletti94/setup_python_package
|
61b5f3cff1ed3181f932293c63c4fcb71cbe0062
|
[
"MIT"
] | null | null | null |
from userinput import userinput
from ..utils import load_repository_author_name
def get_package_author_name() -> str:
"""Return the package author name to be used."""
return userinput(
name="python_package_author_name",
label="Enter the python package author name to use.",
default=load_repository_author_name(),
validator="non_empty",
sanitizer=[
"strip"
],
cache=False
)
| 26.764706
| 61
| 0.648352
|
from userinput import userinput
from ..utils import load_repository_author_name
def get_package_author_name() -> str:
return userinput(
name="python_package_author_name",
label="Enter the python package author name to use.",
default=load_repository_author_name(),
validator="non_empty",
sanitizer=[
"strip"
],
cache=False
)
| true
| true
|
79073053df7e3eef7c63daa9c208a2a275f12015
| 14,067
|
py
|
Python
|
Lib/site-packages/PyQt5/examples/opengl/grabber.py
|
dipivan/my-first-blog
|
07c2b7ba631c747ac85bbd32fcedb9305474b7b8
|
[
"bzip2-1.0.6"
] | 2
|
2020-11-09T23:56:54.000Z
|
2021-07-29T23:15:59.000Z
|
PyQt5_gpl-5.8/examples/opengl/grabber.py
|
ArjandeV/iracing-overlay
|
6286348d78f1538f64928ec867cafc65124eea3d
|
[
"MIT"
] | null | null | null |
PyQt5_gpl-5.8/examples/opengl/grabber.py
|
ArjandeV/iracing-overlay
|
6286348d78f1538f64928ec867cafc65124eea3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2015 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import sys
import math
from PyQt5.QtCore import pyqtSignal, QSize, Qt, QTimer
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QAction, QApplication, QGridLayout, QLabel,
QLineEdit, QMainWindow, QMessageBox, QOpenGLWidget, QScrollArea,
QSizePolicy, QSlider, QWidget)
class GLWidget(QOpenGLWidget):
xRotationChanged = pyqtSignal(int)
yRotationChanged = pyqtSignal(int)
zRotationChanged = pyqtSignal(int)
def __init__(self, parent=None):
super(GLWidget, self).__init__(parent)
self.gear1 = 0
self.gear2 = 0
self.gear3 = 0
self.xRot = 0
self.yRot = 0
self.zRot = 0
self.gear1Rot = 0
timer = QTimer(self)
timer.timeout.connect(self.advanceGears)
timer.start(20)
def setXRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
self.xRotationChanged.emit(angle)
self.update()
def setYRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
self.yRotationChanged.emit(angle)
self.update()
def setZRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
self.zRotationChanged.emit(angle)
self.update()
def initializeGL(self):
self.gl = self.context().versionFunctions()
self.gl.initializeOpenGLFunctions()
lightPos = (5.0, 5.0, 10.0, 1.0)
reflectance1 = (0.8, 0.1, 0.0, 1.0)
reflectance2 = (0.0, 0.8, 0.2, 1.0)
reflectance3 = (0.2, 0.2, 1.0, 1.0)
self.gl.glLightfv(self.gl.GL_LIGHT0, self.gl.GL_POSITION, lightPos)
self.gl.glEnable(self.gl.GL_LIGHTING)
self.gl.glEnable(self.gl.GL_LIGHT0)
self.gl.glEnable(self.gl.GL_DEPTH_TEST)
self.gear1 = self.makeGear(reflectance1, 1.0, 4.0, 1.0, 0.7, 20)
self.gear2 = self.makeGear(reflectance2, 0.5, 2.0, 2.0, 0.7, 10)
self.gear3 = self.makeGear(reflectance3, 1.3, 2.0, 0.5, 0.7, 10)
self.gl.glEnable(self.gl.GL_NORMALIZE)
self.gl.glClearColor(0.0, 0.0, 0.0, 1.0)
def paintGL(self):
self.gl.glClear(self.gl.GL_COLOR_BUFFER_BIT | self.gl.GL_DEPTH_BUFFER_BIT)
self.gl.glPushMatrix()
self.gl.glRotated(self.xRot / 16.0, 1.0, 0.0, 0.0)
self.gl.glRotated(self.yRot / 16.0, 0.0, 1.0, 0.0)
self.gl.glRotated(self.zRot / 16.0, 0.0, 0.0, 1.0)
self.drawGear(self.gear1, -3.0, -2.0, 0.0, self.gear1Rot / 16.0)
self.drawGear(self.gear2, +3.1, -2.0, 0.0,
-2.0 * (self.gear1Rot / 16.0) - 9.0)
self.gl.glRotated(+90.0, 1.0, 0.0, 0.0)
self.drawGear(self.gear3, -3.1, -1.8, -2.2,
+2.0 * (self.gear1Rot / 16.0) - 2.0)
self.gl.glPopMatrix()
def resizeGL(self, width, height):
side = min(width, height)
if side < 0:
return
self.gl.glViewport((width - side) // 2, (height - side) // 2, side, side)
self.gl.glMatrixMode(self.gl.GL_PROJECTION)
self.gl.glLoadIdentity()
self.gl.glFrustum(-1.0, +1.0, -1.0, 1.0, 5.0, 60.0)
self.gl.glMatrixMode(self.gl.GL_MODELVIEW)
self.gl.glLoadIdentity()
self.gl.glTranslated(0.0, 0.0, -40.0)
def mousePressEvent(self, event):
self.lastPos = event.pos()
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & Qt.LeftButton:
self.setXRotation(self.xRot + 8 * dy)
self.setYRotation(self.yRot + 8 * dx)
elif event.buttons() & Qt.RightButton:
self.setXRotation(self.xRot + 8 * dy)
self.setZRotation(self.zRot + 8 * dx)
self.lastPos = event.pos()
def advanceGears(self):
self.gear1Rot += 2 * 16
self.update()
def xRotation(self):
return self.xRot
def yRotation(self):
return self.yRot
def zRotation(self):
return self.zRot
def makeGear(self, reflectance, innerRadius, outerRadius, thickness, toothSize, toothCount):
list = self.gl.glGenLists(1)
self.gl.glNewList(list, self.gl.GL_COMPILE)
self.gl.glMaterialfv(self.gl.GL_FRONT, self.gl.GL_AMBIENT_AND_DIFFUSE,
reflectance)
r0 = innerRadius
r1 = outerRadius - toothSize / 2.0
r2 = outerRadius + toothSize / 2.0
delta = (2.0 * math.pi / toothCount) / 4.0
z = thickness / 2.0
self.gl.glShadeModel(self.gl.GL_FLAT)
for i in range(2):
if i == 0:
sign = +1.0
else:
sign = -1.0
self.gl.glNormal3d(0.0, 0.0, sign)
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for j in range(toothCount+1):
angle = 2.0 * math.pi * j / toothCount
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle), r1 * math.sin(angle), sign * z)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle + 3 * delta), r1 * math.sin(angle + 3 * delta), sign * z)
self.gl.glEnd()
self.gl.glBegin(self.gl.GL_QUADS)
for j in range(toothCount):
angle = 2.0 * math.pi * j / toothCount
self.gl.glVertex3d(r1 * math.cos(angle), r1 * math.sin(angle), sign * z)
self.gl.glVertex3d(r2 * math.cos(angle + delta), r2 * math.sin(angle + delta), sign * z)
self.gl.glVertex3d(r2 * math.cos(angle + 2 * delta), r2 * math.sin(angle + 2 * delta), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle + 3 * delta), r1 * math.sin(angle + 3 * delta), sign * z)
self.gl.glEnd()
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for i in range(toothCount):
for j in range(2):
angle = 2.0 * math.pi * (i + (j / 2.0)) / toothCount
s1 = r1
s2 = r2
if j == 1:
s1, s2 = s2, s1
self.gl.glNormal3d(math.cos(angle), math.sin(angle), 0.0)
self.gl.glVertex3d(s1 * math.cos(angle), s1 * math.sin(angle), +z)
self.gl.glVertex3d(s1 * math.cos(angle), s1 * math.sin(angle), -z)
self.gl.glNormal3d(s2 * math.sin(angle + delta) - s1 * math.sin(angle), s1 * math.cos(angle) - s2 * math.cos(angle + delta), 0.0)
self.gl.glVertex3d(s2 * math.cos(angle + delta), s2 * math.sin(angle + delta), +z)
self.gl.glVertex3d(s2 * math.cos(angle + delta), s2 * math.sin(angle + delta), -z)
self.gl.glVertex3d(r1, 0.0, +z)
self.gl.glVertex3d(r1, 0.0, -z)
self.gl.glEnd()
self.gl.glShadeModel(self.gl.GL_SMOOTH)
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for i in range(toothCount+1):
angle = i * 2.0 * math.pi / toothCount
self.gl.glNormal3d(-math.cos(angle), -math.sin(angle), 0.0)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), +z)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), -z)
self.gl.glEnd()
self.gl.glEndList()
return list
def drawGear(self, gear, dx, dy, dz, angle):
self.gl.glPushMatrix()
self.gl.glTranslated(dx, dy, dz)
self.gl.glRotated(angle, 0.0, 0.0, 1.0)
self.gl.glCallList(gear)
self.gl.glPopMatrix()
def normalizeAngle(self, angle):
while (angle < 0):
angle += 360 * 16
while (angle > 360 * 16):
angle -= 360 * 16
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
centralWidget = QWidget()
self.setCentralWidget(centralWidget)
self.glWidget = GLWidget()
self.pixmapLabel = QLabel()
self.glWidgetArea = QScrollArea()
self.glWidgetArea.setWidget(self.glWidget)
self.glWidgetArea.setWidgetResizable(True)
self.glWidgetArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Ignored)
self.glWidgetArea.setMinimumSize(50, 50)
self.pixmapLabelArea = QScrollArea()
self.pixmapLabelArea.setWidget(self.pixmapLabel)
self.pixmapLabelArea.setSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Ignored)
self.pixmapLabelArea.setMinimumSize(50, 50)
xSlider = self.createSlider(self.glWidget.xRotationChanged,
self.glWidget.setXRotation)
ySlider = self.createSlider(self.glWidget.yRotationChanged,
self.glWidget.setYRotation)
zSlider = self.createSlider(self.glWidget.zRotationChanged,
self.glWidget.setZRotation)
self.createActions()
self.createMenus()
centralLayout = QGridLayout()
centralLayout.addWidget(self.glWidgetArea, 0, 0)
centralLayout.addWidget(self.pixmapLabelArea, 0, 1)
centralLayout.addWidget(xSlider, 1, 0, 1, 2)
centralLayout.addWidget(ySlider, 2, 0, 1, 2)
centralLayout.addWidget(zSlider, 3, 0, 1, 2)
centralWidget.setLayout(centralLayout)
xSlider.setValue(15 * 16)
ySlider.setValue(345 * 16)
zSlider.setValue(0 * 16)
self.setWindowTitle("Grabber")
self.resize(400, 300)
def grabFrameBuffer(self):
image = self.glWidget.grabFramebuffer()
self.setPixmap(QPixmap.fromImage(image))
def clearPixmap(self):
self.setPixmap(QPixmap())
def about(self):
QMessageBox.about(self, "About Grabber",
"The <b>Grabber</b> example demonstrates two approaches for "
"rendering OpenGL into a Qt pixmap.")
def createActions(self):
self.grabFrameBufferAct = QAction("&Grab Frame Buffer", self,
shortcut="Ctrl+G", triggered=self.grabFrameBuffer)
self.clearPixmapAct = QAction("&Clear Pixmap", self,
shortcut="Ctrl+L", triggered=self.clearPixmap)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.aboutAct = QAction("&About", self, triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
triggered=QApplication.instance().aboutQt)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.grabFrameBufferAct)
self.fileMenu.addAction(self.clearPixmapAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def createSlider(self, changedSignal, setterSlot):
slider = QSlider(Qt.Horizontal)
slider.setRange(0, 360 * 16)
slider.setSingleStep(16)
slider.setPageStep(15 * 16)
slider.setTickInterval(15 * 16)
slider.setTickPosition(QSlider.TicksRight)
slider.valueChanged.connect(setterSlot)
changedSignal.connect(slider.setValue)
return slider
def setPixmap(self, pixmap):
self.pixmapLabel.setPixmap(pixmap)
size = pixmap.size()
if size - QSize(1, 0) == self.pixmapLabelArea.maximumViewportSize():
size -= QSize(1, 0)
self.pixmapLabel.resize(size)
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
| 35.522727
| 145
| 0.603967
|
if i == 0:
sign = +1.0
else:
sign = -1.0
self.gl.glNormal3d(0.0, 0.0, sign)
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for j in range(toothCount+1):
angle = 2.0 * math.pi * j / toothCount
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle), r1 * math.sin(angle), sign * z)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle + 3 * delta), r1 * math.sin(angle + 3 * delta), sign * z)
self.gl.glEnd()
self.gl.glBegin(self.gl.GL_QUADS)
for j in range(toothCount):
angle = 2.0 * math.pi * j / toothCount
self.gl.glVertex3d(r1 * math.cos(angle), r1 * math.sin(angle), sign * z)
self.gl.glVertex3d(r2 * math.cos(angle + delta), r2 * math.sin(angle + delta), sign * z)
self.gl.glVertex3d(r2 * math.cos(angle + 2 * delta), r2 * math.sin(angle + 2 * delta), sign * z)
self.gl.glVertex3d(r1 * math.cos(angle + 3 * delta), r1 * math.sin(angle + 3 * delta), sign * z)
self.gl.glEnd()
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for i in range(toothCount):
for j in range(2):
angle = 2.0 * math.pi * (i + (j / 2.0)) / toothCount
s1 = r1
s2 = r2
if j == 1:
s1, s2 = s2, s1
self.gl.glNormal3d(math.cos(angle), math.sin(angle), 0.0)
self.gl.glVertex3d(s1 * math.cos(angle), s1 * math.sin(angle), +z)
self.gl.glVertex3d(s1 * math.cos(angle), s1 * math.sin(angle), -z)
self.gl.glNormal3d(s2 * math.sin(angle + delta) - s1 * math.sin(angle), s1 * math.cos(angle) - s2 * math.cos(angle + delta), 0.0)
self.gl.glVertex3d(s2 * math.cos(angle + delta), s2 * math.sin(angle + delta), +z)
self.gl.glVertex3d(s2 * math.cos(angle + delta), s2 * math.sin(angle + delta), -z)
self.gl.glVertex3d(r1, 0.0, +z)
self.gl.glVertex3d(r1, 0.0, -z)
self.gl.glEnd()
self.gl.glShadeModel(self.gl.GL_SMOOTH)
self.gl.glBegin(self.gl.GL_QUAD_STRIP)
for i in range(toothCount+1):
angle = i * 2.0 * math.pi / toothCount
self.gl.glNormal3d(-math.cos(angle), -math.sin(angle), 0.0)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), +z)
self.gl.glVertex3d(r0 * math.cos(angle), r0 * math.sin(angle), -z)
self.gl.glEnd()
self.gl.glEndList()
return list
def drawGear(self, gear, dx, dy, dz, angle):
self.gl.glPushMatrix()
self.gl.glTranslated(dx, dy, dz)
self.gl.glRotated(angle, 0.0, 0.0, 1.0)
self.gl.glCallList(gear)
self.gl.glPopMatrix()
def normalizeAngle(self, angle):
while (angle < 0):
angle += 360 * 16
while (angle > 360 * 16):
angle -= 360 * 16
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
centralWidget = QWidget()
self.setCentralWidget(centralWidget)
self.glWidget = GLWidget()
self.pixmapLabel = QLabel()
self.glWidgetArea = QScrollArea()
self.glWidgetArea.setWidget(self.glWidget)
self.glWidgetArea.setWidgetResizable(True)
self.glWidgetArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Ignored)
self.glWidgetArea.setMinimumSize(50, 50)
self.pixmapLabelArea = QScrollArea()
self.pixmapLabelArea.setWidget(self.pixmapLabel)
self.pixmapLabelArea.setSizePolicy(QSizePolicy.Ignored,
QSizePolicy.Ignored)
self.pixmapLabelArea.setMinimumSize(50, 50)
xSlider = self.createSlider(self.glWidget.xRotationChanged,
self.glWidget.setXRotation)
ySlider = self.createSlider(self.glWidget.yRotationChanged,
self.glWidget.setYRotation)
zSlider = self.createSlider(self.glWidget.zRotationChanged,
self.glWidget.setZRotation)
self.createActions()
self.createMenus()
centralLayout = QGridLayout()
centralLayout.addWidget(self.glWidgetArea, 0, 0)
centralLayout.addWidget(self.pixmapLabelArea, 0, 1)
centralLayout.addWidget(xSlider, 1, 0, 1, 2)
centralLayout.addWidget(ySlider, 2, 0, 1, 2)
centralLayout.addWidget(zSlider, 3, 0, 1, 2)
centralWidget.setLayout(centralLayout)
xSlider.setValue(15 * 16)
ySlider.setValue(345 * 16)
zSlider.setValue(0 * 16)
self.setWindowTitle("Grabber")
self.resize(400, 300)
def grabFrameBuffer(self):
image = self.glWidget.grabFramebuffer()
self.setPixmap(QPixmap.fromImage(image))
def clearPixmap(self):
self.setPixmap(QPixmap())
def about(self):
QMessageBox.about(self, "About Grabber",
"The <b>Grabber</b> example demonstrates two approaches for "
"rendering OpenGL into a Qt pixmap.")
def createActions(self):
self.grabFrameBufferAct = QAction("&Grab Frame Buffer", self,
shortcut="Ctrl+G", triggered=self.grabFrameBuffer)
self.clearPixmapAct = QAction("&Clear Pixmap", self,
shortcut="Ctrl+L", triggered=self.clearPixmap)
self.exitAct = QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.aboutAct = QAction("&About", self, triggered=self.about)
self.aboutQtAct = QAction("About &Qt", self,
triggered=QApplication.instance().aboutQt)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.grabFrameBufferAct)
self.fileMenu.addAction(self.clearPixmapAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def createSlider(self, changedSignal, setterSlot):
slider = QSlider(Qt.Horizontal)
slider.setRange(0, 360 * 16)
slider.setSingleStep(16)
slider.setPageStep(15 * 16)
slider.setTickInterval(15 * 16)
slider.setTickPosition(QSlider.TicksRight)
slider.valueChanged.connect(setterSlot)
changedSignal.connect(slider.setValue)
return slider
def setPixmap(self, pixmap):
self.pixmapLabel.setPixmap(pixmap)
size = pixmap.size()
if size - QSize(1, 0) == self.pixmapLabelArea.maximumViewportSize():
size -= QSize(1, 0)
self.pixmapLabel.resize(size)
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
| true
| true
|
7907335812e378d83c592760a69e195c81a6ff01
| 868
|
py
|
Python
|
components/fighter.py
|
StormCloud71/tdl-roguelike-tute
|
d43765b0cff5123b72d4d9aaa87ee174c3562162
|
[
"CNRI-Python"
] | null | null | null |
components/fighter.py
|
StormCloud71/tdl-roguelike-tute
|
d43765b0cff5123b72d4d9aaa87ee174c3562162
|
[
"CNRI-Python"
] | null | null | null |
components/fighter.py
|
StormCloud71/tdl-roguelike-tute
|
d43765b0cff5123b72d4d9aaa87ee174c3562162
|
[
"CNRI-Python"
] | null | null | null |
class Fighter:
def __init__(self, hp, defense, power):
self.max_hp = hp
self.hp = hp
self.defense = defense
self.power = power
def take_damage(self, amount):
results=[]
self.hp -= amount
if self.hp<0:
results.append({'dead':self.owner})
return results
def attack(self, target):
results=[]
damage = self.power - target.fighter.defense
if damage > 0:
results.append({'message': '{0} attacks {1} for {2} hit points.'.format(
self.owner.name.capitalize(), target.name, str(damage))})
results.extend(target.fighter.take_damage(damage))
else:
results.append({'message': '{0} attacks {1} but does no damage.'.format(
self.owner.name.capitalize(), target.name)})
return results
| 33.384615
| 84
| 0.56106
|
class Fighter:
def __init__(self, hp, defense, power):
self.max_hp = hp
self.hp = hp
self.defense = defense
self.power = power
def take_damage(self, amount):
results=[]
self.hp -= amount
if self.hp<0:
results.append({'dead':self.owner})
return results
def attack(self, target):
results=[]
damage = self.power - target.fighter.defense
if damage > 0:
results.append({'message': '{0} attacks {1} for {2} hit points.'.format(
self.owner.name.capitalize(), target.name, str(damage))})
results.extend(target.fighter.take_damage(damage))
else:
results.append({'message': '{0} attacks {1} but does no damage.'.format(
self.owner.name.capitalize(), target.name)})
return results
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.