code stringlengths 20 1.04M | apis sequence | extract_api stringlengths 75 9.94M |
|---|---|---|
import os
import pytest
# No CLI test due to sc-show
@pytest.mark.eda
@pytest.mark.quick
def test_py(setup_example_test):
setup_example_test('fibone')
import fibone
fibone.main()
assert os.path.isfile('build/mkFibOne/job0/export/0/outputs/mkFibOne.gds')
| [
"fibone.main",
"os.path.isfile"
] | [((181, 194), 'fibone.main', 'fibone.main', ([], {}), '()\n', (192, 194), False, 'import fibone\n'), ((207, 274), 'os.path.isfile', 'os.path.isfile', (['"""build/mkFibOne/job0/export/0/outputs/mkFibOne.gds"""'], {}), "('build/mkFibOne/job0/export/0/outputs/mkFibOne.gds')\n", (221, 274), False, 'import os\n')] |
from setuptools import setup
from setuptools.command.develop import develop
setup(name='retroprime',
py_modules=['retroprime']) | [
"setuptools.setup"
] | [((78, 129), 'setuptools.setup', 'setup', ([], {'name': '"""retroprime"""', 'py_modules': "['retroprime']"}), "(name='retroprime', py_modules=['retroprime'])\n", (83, 129), False, 'from setuptools import setup\n')] |
# import the necessary packages
from tensorflow.keras.models import load_model
from image_classification.data import DataDispatcher
from image_classification.utils import config
from image_classification.layers import Mish
import numpy as np
import argparse
# construct an argument parser to parse the command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-w", "--weights", required=True, help="path to model weights")
args = vars(ap.parse_args())
# load the dataset
dd = DataDispatcher()
test_ds = dd.get_test_data()
# load the model
model = load_model(args["weights"], custom_objects={"Mish": Mish})
# model = load_model(args["weights"])
# evaluate the model
test_steps = np.ceil(dd.num_test_imgs / config.BS)
H = model.evaluate(x=test_ds, batch_size=config.BS, steps=test_steps)
# print the results
print(f"[INFO] test set loss: {np.round(H[0], 4)}")
print(f"[INFO] test set acc: {np.round(H[1], 4)}")
| [
"tensorflow.keras.models.load_model",
"image_classification.data.DataDispatcher",
"numpy.ceil",
"argparse.ArgumentParser",
"numpy.round"
] | [((331, 356), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (354, 356), False, 'import argparse\n'), ((491, 507), 'image_classification.data.DataDispatcher', 'DataDispatcher', ([], {}), '()\n', (505, 507), False, 'from image_classification.data import DataDispatcher\n'), ((563, 621), 'tensorflow.keras.models.load_model', 'load_model', (["args['weights']"], {'custom_objects': "{'Mish': Mish}"}), "(args['weights'], custom_objects={'Mish': Mish})\n", (573, 621), False, 'from tensorflow.keras.models import load_model\n'), ((695, 732), 'numpy.ceil', 'np.ceil', (['(dd.num_test_imgs / config.BS)'], {}), '(dd.num_test_imgs / config.BS)\n', (702, 732), True, 'import numpy as np\n'), ((855, 872), 'numpy.round', 'np.round', (['H[0]', '(4)'], {}), '(H[0], 4)\n', (863, 872), True, 'import numpy as np\n'), ((906, 923), 'numpy.round', 'np.round', (['H[1]', '(4)'], {}), '(H[1], 4)\n', (914, 923), True, 'import numpy as np\n')] |
import folium
from pathlib import Path
from sportgems import parse_fit_data, find_fastest_section
# desired fastest sections to parse, note larges must come first in
# order to be able to render the smaller sections on top of the larger ones
sections = [5000, 3000, 2000, 1000]
colors = ["yellow", "blue", "green", "red"]
if __name__ == "__main__":
fit_file = Path(".").parent / "tests" / "data" / "2019-09-14-17-22-05.fit"
fit_data = parse_fit_data(str(fit_file))
coords = []
for coordinate in fit_data.coordinates:
if coordinate[0] > 0 and coordinate[1] > 0:
coords.append((coordinate[0], coordinate[1]))
trace = folium.PolyLine(coords, color="black")
map = folium.Map(location=fit_data.coordinates[300], zoom_start=15)
trace.add_to(map)
for i in range(len(sections)):
fs = find_fastest_section(sections[i], fit_data.times, fit_data.coordinates)
fs_coords = coords[fs.start:fs.end]
fs_poly = folium.PolyLine(fs_coords, color=colors[i])
fs_poly.add_to(map)
output_file = "map.html"
map.save(output_file)
print(f"saved map to {output_file}, can be viewed in browser") | [
"folium.PolyLine",
"pathlib.Path",
"sportgems.find_fastest_section",
"folium.Map"
] | [((658, 696), 'folium.PolyLine', 'folium.PolyLine', (['coords'], {'color': '"""black"""'}), "(coords, color='black')\n", (673, 696), False, 'import folium\n'), ((707, 768), 'folium.Map', 'folium.Map', ([], {'location': 'fit_data.coordinates[300]', 'zoom_start': '(15)'}), '(location=fit_data.coordinates[300], zoom_start=15)\n', (717, 768), False, 'import folium\n'), ((840, 911), 'sportgems.find_fastest_section', 'find_fastest_section', (['sections[i]', 'fit_data.times', 'fit_data.coordinates'], {}), '(sections[i], fit_data.times, fit_data.coordinates)\n', (860, 911), False, 'from sportgems import parse_fit_data, find_fastest_section\n'), ((974, 1017), 'folium.PolyLine', 'folium.PolyLine', (['fs_coords'], {'color': 'colors[i]'}), '(fs_coords, color=colors[i])\n', (989, 1017), False, 'import folium\n'), ((366, 375), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (370, 375), False, 'from pathlib import Path\n')] |
from flask import Flask, render_template, abort
from fauxsnow import ResortModel, ForecastAPILoader, ForecastModel
app = Flask(__name__)
view_count = 0
@app.route("/")
def welcome():
resort_model = ResortModel()
resorts = resort_model.get_all_resorts()
return render_template("welcome.html", resorts=resorts)
@app.route("/detail/<text_id>")
def detail(text_id):
try:
resort_model = ResortModel()
resort = resort_model.get_resort_by_id(text_id)
if resort:
return render_template("detail.html", resort=resort)
else:
abort(500)
except IndexError:
abort(404)
@app.route("/refresh")
def refresh():
rm = ResortModel()
fm = ForecastModel()
resorts = rm.get_all_resorts()
fAPI = ForecastAPILoader()
forecasts = fAPI.load_forecasts_from_api(resorts)
# if the api call returns None, fail gracefully.
message = ''
if forecasts:
fm.save_forecasts(forecasts)
message = 'Updated forecasts'
else:
message = 'could not update forecasts'
return render_template('refresh.html', message=message)
@app.route("/about")
def about():
rm = ResortModel()
resorts = rm.get_all_resorts()
num_resorts = len(resorts)
return render_template("about.html", resorts=resorts, num_resorts=num_resorts)
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html', title = '404 Not Found'), 404
@app.errorhandler(500)
def page_not_found(error):
return render_template('404.html', title = 'Something went wrong'), 500
| [
"flask.Flask",
"fauxsnow.ForecastModel",
"flask.abort",
"fauxsnow.ForecastAPILoader",
"fauxsnow.ResortModel",
"flask.render_template"
] | [((122, 137), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (127, 137), False, 'from flask import Flask, render_template, abort\n'), ((207, 220), 'fauxsnow.ResortModel', 'ResortModel', ([], {}), '()\n', (218, 220), False, 'from fauxsnow import ResortModel, ForecastAPILoader, ForecastModel\n'), ((277, 325), 'flask.render_template', 'render_template', (['"""welcome.html"""'], {'resorts': 'resorts'}), "('welcome.html', resorts=resorts)\n", (292, 325), False, 'from flask import Flask, render_template, abort\n'), ((693, 706), 'fauxsnow.ResortModel', 'ResortModel', ([], {}), '()\n', (704, 706), False, 'from fauxsnow import ResortModel, ForecastAPILoader, ForecastModel\n'), ((716, 731), 'fauxsnow.ForecastModel', 'ForecastModel', ([], {}), '()\n', (729, 731), False, 'from fauxsnow import ResortModel, ForecastAPILoader, ForecastModel\n'), ((778, 797), 'fauxsnow.ForecastAPILoader', 'ForecastAPILoader', ([], {}), '()\n', (795, 797), False, 'from fauxsnow import ResortModel, ForecastAPILoader, ForecastModel\n'), ((1083, 1131), 'flask.render_template', 'render_template', (['"""refresh.html"""'], {'message': 'message'}), "('refresh.html', message=message)\n", (1098, 1131), False, 'from flask import Flask, render_template, abort\n'), ((1176, 1189), 'fauxsnow.ResortModel', 'ResortModel', ([], {}), '()\n', (1187, 1189), False, 'from fauxsnow import ResortModel, ForecastAPILoader, ForecastModel\n'), ((1267, 1338), 'flask.render_template', 'render_template', (['"""about.html"""'], {'resorts': 'resorts', 'num_resorts': 'num_resorts'}), "('about.html', resorts=resorts, num_resorts=num_resorts)\n", (1282, 1338), False, 'from flask import Flask, render_template, abort\n'), ((412, 425), 'fauxsnow.ResortModel', 'ResortModel', ([], {}), '()\n', (423, 425), False, 'from fauxsnow import ResortModel, ForecastAPILoader, ForecastModel\n'), ((1400, 1450), 'flask.render_template', 'render_template', (['"""404.html"""'], {'title': '"""404 Not Found"""'}), "('404.html', title='404 Not Found')\n", (1415, 1450), False, 'from flask import Flask, render_template, abort\n'), ((1519, 1576), 'flask.render_template', 'render_template', (['"""404.html"""'], {'title': '"""Something went wrong"""'}), "('404.html', title='Something went wrong')\n", (1534, 1576), False, 'from flask import Flask, render_template, abort\n'), ((520, 565), 'flask.render_template', 'render_template', (['"""detail.html"""'], {'resort': 'resort'}), "('detail.html', resort=resort)\n", (535, 565), False, 'from flask import Flask, render_template, abort\n'), ((592, 602), 'flask.abort', 'abort', (['(500)'], {}), '(500)\n', (597, 602), False, 'from flask import Flask, render_template, abort\n'), ((634, 644), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (639, 644), False, 'from flask import Flask, render_template, abort\n')] |
from PyQt5.QtCore import Qt, QSize
from PyQt5.QtWidgets import QToolButton
from PyQt5 import QtGui
import filedockstylesheet as style
class FolderButton(QToolButton):
def __init__(self, folderNumber, layoutPosition, path, clicked, parent=None):
super(FolderButton, self).__init__(parent)
self.folderNumber = folderNumber
self.layoutPosition = layoutPosition
self.path = path
self.folderName = path.split("/")[-1]
self.clickCount = 1
self.setObjectName('FolderButton')
self.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
self.setIconSize(QSize(50,50))
self.setIcon(QtGui.QIcon('assets/folderIcon.png'))
self.setText(self.folderName)
self.released.connect(clicked) | [
"PyQt5.QtGui.QIcon",
"PyQt5.QtCore.QSize"
] | [((621, 634), 'PyQt5.QtCore.QSize', 'QSize', (['(50)', '(50)'], {}), '(50, 50)\n', (626, 634), False, 'from PyQt5.QtCore import Qt, QSize\n'), ((656, 692), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""assets/folderIcon.png"""'], {}), "('assets/folderIcon.png')\n", (667, 692), False, 'from PyQt5 import QtGui\n')] |
import random
from typing import TypeVar, MutableSequence
T = TypeVar('T')
def sample_items_inplace(items: MutableSequence[T], sample_size: int, item_limit: int = None):
"""Moves sampled elements to the end of items list.
When sample size is equal to the size of the items list it
shuffles items in-place.
"""
n = len(items)
if item_limit is None:
item_limit = n
elif not 0 <= item_limit <= n:
raise ValueError("Item limit is negative or larger than item list size")
if not 0 <= sample_size <= n:
raise ValueError("Sample size is negative or larger than items list")
if sample_size > item_limit:
raise ValueError("Sample size is greater than item limit")
for i in range(sample_size):
j = random.randrange(item_limit - i)
current_index = item_limit - i - 1
if current_index != j:
tmp = items[j]
items[j] = items[current_index]
items[current_index] = tmp
| [
"typing.TypeVar",
"random.randrange"
] | [((63, 75), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (70, 75), False, 'from typing import TypeVar, MutableSequence\n'), ((772, 804), 'random.randrange', 'random.randrange', (['(item_limit - i)'], {}), '(item_limit - i)\n', (788, 804), False, 'import random\n')] |
import re
def capture(input: str, regex: str, pattern_flags: int = 0, groupnum: int = 1, fail_gently: bool = False) -> str:
pattern = re.compile(regex, pattern_flags)
match = pattern.search(input)
if match is None:
if not fail_gently:
raise Warning(f'Attempt to match {regex} on {input} at group {groupnum} failed.')
return None
captured_text = match.group(groupnum)
return captured_text
def almost_equal(str1: str, str2: str) -> bool:
if str1 is None or str2 is None:
return str1 is None and str2 is None
else:
str1_processed = re.sub(r'\W+', '', str1.strip().casefold())
str2_processed = re.sub(r'\W+', '', str2.strip().casefold())
return str1_processed == str2_processed
| [
"re.compile"
] | [((140, 172), 're.compile', 're.compile', (['regex', 'pattern_flags'], {}), '(regex, pattern_flags)\n', (150, 172), False, 'import re\n')] |
# -*- coding: utf-8 -*-
import io
import re
import demjson3
import pandas as pd
import requests
from zvt.api.utils import china_stock_code_to_id
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder
from zvt.domain import EtfStock, Etf
from zvt.recorders.consts import DEFAULT_SH_ETF_LIST_HEADER
from zvt.utils.time_utils import now_pd_timestamp
class ChinaETFListSpider(Recorder):
data_schema = EtfStock
def __init__(self, force_update=False, sleeping_time=10.0, provider='exchange') -> None:
self.provider = provider
super().__init__(force_update, sleeping_time)
def run(self):
# 抓取沪市 ETF 列表
url = 'http://query.sse.com.cn/commonQuery.do?sqlId=COMMON_SSE_ZQPZ_ETFLB_L_NEW'
response = requests.get(url, headers=DEFAULT_SH_ETF_LIST_HEADER)
response_dict = demjson3.decode(response.text)
df = pd.DataFrame(response_dict.get('result', []))
self.persist_etf_list(df, exchange='sh')
self.logger.info('沪市 ETF 列表抓取完成...')
# 抓取沪市 ETF 成分股
self.download_sh_etf_component(df)
self.logger.info('沪市 ETF 成分股抓取完成...')
# 抓取深市 ETF 列表
url = 'http://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1945'
response = requests.get(url)
df = pd.read_excel(io.BytesIO(response.content), dtype=str)
self.persist_etf_list(df, exchange='sz')
self.logger.info('深市 ETF 列表抓取完成...')
# 抓取深市 ETF 成分股
self.download_sz_etf_component(df)
self.logger.info('深市 ETF 成分股抓取完成...')
def persist_etf_list(self, df: pd.DataFrame, exchange: str):
if df is None:
return
df = df.copy()
if exchange == 'sh':
df = df[['FUND_ID', 'FUND_NAME']]
elif exchange == 'sz':
df = df[['证券代码', '证券简称']]
df.columns = ['code', 'name']
df['id'] = df['code'].apply(lambda code: f'etf_{exchange}_{code}')
df['entity_id'] = df['id']
df['exchange'] = exchange
df['entity_type'] = 'etf'
df['category'] = 'etf'
df = df.dropna(axis=0, how='any')
df = df.drop_duplicates(subset='id', keep='last')
df_to_db(df=df, data_schema=Etf, provider=self.provider, force_update=False)
def download_sh_etf_component(self, df: pd.DataFrame):
"""
ETF_CLASS => 1. 单市场 ETF 2.跨市场 ETF 3. 跨境 ETF
5. 债券 ETF 6. 黄金 ETF
:param df: ETF 列表数据
:return: None
"""
query_url = 'http://query.sse.com.cn/infodisplay/queryConstituentStockInfo.do?' \
'isPagination=false&type={}&etfClass={}'
etf_df = df[(df['ETF_CLASS'] == '1') | (df['ETF_CLASS'] == '2')]
etf_df = self.populate_sh_etf_type(etf_df)
for _, etf in etf_df.iterrows():
url = query_url.format(etf['ETF_TYPE'], etf['ETF_CLASS'])
response = requests.get(url, headers=DEFAULT_SH_ETF_LIST_HEADER)
response_dict = demjson3.decode(response.text)
response_df = pd.DataFrame(response_dict.get('result', []))
etf_code = etf['FUND_ID']
etf_id = f'etf_sh_{etf_code}'
response_df = response_df[['instrumentId', 'instrumentName']].copy()
response_df.rename(columns={'instrumentId': 'stock_code', 'instrumentName': 'stock_name'}, inplace=True)
response_df['entity_id'] = etf_id
response_df['entity_type'] = 'etf'
response_df['exchange'] = 'sh'
response_df['code'] = etf_code
response_df['name'] = etf['FUND_NAME']
response_df['timestamp'] = now_pd_timestamp()
response_df['stock_id'] = response_df['stock_code'].apply(lambda code: china_stock_code_to_id(code))
response_df['id'] = response_df['stock_id'].apply(
lambda x: f'{etf_id}_{x}')
df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)
self.logger.info(f'{etf["FUND_NAME"]} - {etf_code} 成分股抓取完成...')
self.sleep()
def download_sz_etf_component(self, df: pd.DataFrame):
query_url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vII_NewestComponent/indexid/{}.phtml'
self.parse_sz_etf_underlying_index(df)
for _, etf in df.iterrows():
underlying_index = etf['拟合指数']
etf_code = etf['证券代码']
if len(underlying_index) == 0:
self.logger.info(f'{etf["证券简称"]} - {etf_code} 非 A 股市场指数,跳过...')
continue
url = query_url.format(underlying_index)
response = requests.get(url)
response.encoding = 'gbk'
try:
dfs = pd.read_html(response.text, header=1)
except ValueError as error:
self.logger.error(f'HTML parse error: {error}, response: {response.text}')
continue
if len(dfs) < 4:
continue
response_df = dfs[3].copy()
response_df = response_df.dropna(axis=1, how='any')
response_df['品种代码'] = response_df['品种代码'].apply(lambda x: f'{x:06d}')
etf_id = f'etf_sz_{etf_code}'
response_df = response_df[['品种代码', '品种名称']].copy()
response_df.rename(columns={'品种代码': 'stock_code', '品种名称': 'stock_name'}, inplace=True)
response_df['entity_id'] = etf_id
response_df['entity_type'] = 'etf'
response_df['exchange'] = 'sz'
response_df['code'] = etf_code
response_df['name'] = etf['证券简称']
response_df['timestamp'] = now_pd_timestamp()
response_df['stock_id'] = response_df['stock_code'].apply(lambda code: china_stock_code_to_id(code))
response_df['id'] = response_df['stock_id'].apply(
lambda x: f'{etf_id}_{x}')
df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)
self.logger.info(f'{etf["证券简称"]} - {etf_code} 成分股抓取完成...')
self.sleep()
@staticmethod
def populate_sh_etf_type(df: pd.DataFrame):
"""
填充沪市 ETF 代码对应的 TYPE 到列表数据中
:param df: ETF 列表数据
:return: 包含 ETF 对应 TYPE 的列表数据
"""
query_url = 'http://query.sse.com.cn/infodisplay/queryETFNewAllInfo.do?' \
'isPagination=false&type={}&pageHelp.pageSize=25'
type_df = pd.DataFrame()
for etf_class in [1, 2]:
url = query_url.format(etf_class)
response = requests.get(url, headers=DEFAULT_SH_ETF_LIST_HEADER)
response_dict = demjson3.decode(response.text)
response_df = pd.DataFrame(response_dict.get('result', []))
response_df = response_df[['fundid1', 'etftype']]
type_df = pd.concat([type_df, response_df])
result_df = df.copy()
result_df = result_df.sort_values(by='FUND_ID').reset_index(drop=True)
type_df = type_df.sort_values(by='fundid1').reset_index(drop=True)
result_df['ETF_TYPE'] = type_df['etftype']
return result_df
@staticmethod
def parse_sz_etf_underlying_index(df: pd.DataFrame):
"""
解析深市 ETF 对应跟踪的指数代码
:param df: ETF 列表数据
:return: 解析完成 ETF 对应指数代码的列表数据
"""
def parse_index(text):
if len(text) == 0:
return ''
result = re.search(r"(\d+).*", text)
if result is None:
return ''
else:
return result.group(1)
df['拟合指数'] = df['拟合指数'].apply(parse_index)
__all__ = ['ChinaETFListSpider']
if __name__ == '__main__':
spider = ChinaETFListSpider(provider='exchange')
spider.run()
# the __all__ is generated
__all__ = ['ChinaETFListSpider'] | [
"pandas.DataFrame",
"pandas.read_html",
"io.BytesIO",
"zvt.utils.time_utils.now_pd_timestamp",
"zvt.contract.api.df_to_db",
"zvt.api.utils.china_stock_code_to_id",
"demjson3.decode",
"requests.get",
"re.search",
"pandas.concat"
] | [((772, 825), 'requests.get', 'requests.get', (['url'], {'headers': 'DEFAULT_SH_ETF_LIST_HEADER'}), '(url, headers=DEFAULT_SH_ETF_LIST_HEADER)\n', (784, 825), False, 'import requests\n'), ((850, 880), 'demjson3.decode', 'demjson3.decode', (['response.text'], {}), '(response.text)\n', (865, 880), False, 'import demjson3\n'), ((1276, 1293), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1288, 1293), False, 'import requests\n'), ((2204, 2280), 'zvt.contract.api.df_to_db', 'df_to_db', ([], {'df': 'df', 'data_schema': 'Etf', 'provider': 'self.provider', 'force_update': '(False)'}), '(df=df, data_schema=Etf, provider=self.provider, force_update=False)\n', (2212, 2280), False, 'from zvt.contract.api import df_to_db\n'), ((6440, 6454), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6452, 6454), True, 'import pandas as pd\n'), ((1322, 1350), 'io.BytesIO', 'io.BytesIO', (['response.content'], {}), '(response.content)\n', (1332, 1350), False, 'import io\n'), ((2922, 2975), 'requests.get', 'requests.get', (['url'], {'headers': 'DEFAULT_SH_ETF_LIST_HEADER'}), '(url, headers=DEFAULT_SH_ETF_LIST_HEADER)\n', (2934, 2975), False, 'import requests\n'), ((3004, 3034), 'demjson3.decode', 'demjson3.decode', (['response.text'], {}), '(response.text)\n', (3019, 3034), False, 'import demjson3\n'), ((3656, 3674), 'zvt.utils.time_utils.now_pd_timestamp', 'now_pd_timestamp', ([], {}), '()\n', (3672, 3674), False, 'from zvt.utils.time_utils import now_pd_timestamp\n'), ((3908, 3986), 'zvt.contract.api.df_to_db', 'df_to_db', ([], {'data_schema': 'self.data_schema', 'df': 'response_df', 'provider': 'self.provider'}), '(data_schema=self.data_schema, df=response_df, provider=self.provider)\n', (3916, 3986), False, 'from zvt.contract.api import df_to_db\n'), ((4646, 4663), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4658, 4663), False, 'import requests\n'), ((5648, 5666), 'zvt.utils.time_utils.now_pd_timestamp', 'now_pd_timestamp', ([], {}), '()\n', (5664, 5666), False, 'from zvt.utils.time_utils import now_pd_timestamp\n'), ((5900, 5978), 'zvt.contract.api.df_to_db', 'df_to_db', ([], {'data_schema': 'self.data_schema', 'df': 'response_df', 'provider': 'self.provider'}), '(data_schema=self.data_schema, df=response_df, provider=self.provider)\n', (5908, 5978), False, 'from zvt.contract.api import df_to_db\n'), ((6557, 6610), 'requests.get', 'requests.get', (['url'], {'headers': 'DEFAULT_SH_ETF_LIST_HEADER'}), '(url, headers=DEFAULT_SH_ETF_LIST_HEADER)\n', (6569, 6610), False, 'import requests\n'), ((6639, 6669), 'demjson3.decode', 'demjson3.decode', (['response.text'], {}), '(response.text)\n', (6654, 6669), False, 'import demjson3\n'), ((6827, 6860), 'pandas.concat', 'pd.concat', (['[type_df, response_df]'], {}), '([type_df, response_df])\n', (6836, 6860), True, 'import pandas as pd\n'), ((7428, 7455), 're.search', 're.search', (['"""(\\\\d+).*"""', 'text'], {}), "('(\\\\d+).*', text)\n", (7437, 7455), False, 'import re\n'), ((4742, 4779), 'pandas.read_html', 'pd.read_html', (['response.text'], {'header': '(1)'}), '(response.text, header=1)\n', (4754, 4779), True, 'import pandas as pd\n'), ((3759, 3787), 'zvt.api.utils.china_stock_code_to_id', 'china_stock_code_to_id', (['code'], {}), '(code)\n', (3781, 3787), False, 'from zvt.api.utils import china_stock_code_to_id\n'), ((5751, 5779), 'zvt.api.utils.china_stock_code_to_id', 'china_stock_code_to_id', (['code'], {}), '(code)\n', (5773, 5779), False, 'from zvt.api.utils import china_stock_code_to_id\n')] |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Script to plot the observations from rollouts as well as their mean and standard deviation
"""
import os
import os.path as osp
import numpy as np
import torch as to
from tabulate import tabulate
import pyrado
from pyrado.logger.experiment import ask_for_experiment, load_dict_from_yaml
from pyrado.utils.argparser import get_argparser
from pyrado.utils.experiments import load_rollouts_from_dir
if __name__ == "__main__":
# Parse command line arguments
args = get_argparser().parse_args()
# Get the experiment's directory to load from
ex_dir = ask_for_experiment(hparam_list=args.show_hparams) if args.dir is None else args.dir
# Load the rollouts
rollouts, names = load_rollouts_from_dir(ex_dir)
# load rollouts from the
hparam, settings = None, None
for file_name in os.listdir(ex_dir):
if file_name.startswith("hparam") and file_name.endswith(".yaml"):
hparam = load_dict_from_yaml(osp.join(ex_dir, file_name))
elif file_name == "settings.yaml":
settings = load_dict_from_yaml(osp.join(ex_dir, file_name))
if not hparam:
raise pyrado.PathErr(msg="No hyperparam file could be found.")
# get the number of real rollouts from the hyperparams dict
if hparam.get("algo_hparam", None) and hparam.get("algo_hparam").get("num_real_rollouts", None):
num_real_rollouts = hparam.get("algo_hparam").get("num_real_rollouts", None)
elif settings and settings.get("algo_hparam", None):
num_real_rollouts = settings.get("algo_hparam").get("num_real_rollouts", None)
else:
raise pyrado.ValueErr(msg="No `num_real_rollouts` argument was found.")
# get list of iteration numbers and sort them in ascending order
prefix = "iter_"
iter_idcs = [int(name[name.find(prefix) + len(prefix)]) for name in names]
sorted_idcs = np.argsort(iter_idcs)
# collect the rewards
rewards = to.stack([r.undiscounted_return() for r in rollouts])
table = []
mean_reward = []
std_reward = []
for i in sorted_idcs:
mean_reward = to.mean(rewards[i * num_real_rollouts : (i + 1) * num_real_rollouts])
std_reward = to.std(rewards[i * num_real_rollouts : (i + 1) * num_real_rollouts])
max_reward = to.max(rewards[i * num_real_rollouts : (i + 1) * num_real_rollouts])
table.append([iter_idcs[i], num_real_rollouts, mean_reward, std_reward, max_reward])
headers = ("iteration", "num real rollouts", "mean reward", "std reward", "max reward")
# Yehaa
print(tabulate(table, headers))
# Save the table in a latex file if requested
if args.save:
# Save the table for LaTeX
table_latex_str = tabulate(table, headers, tablefmt="latex")
with open(osp.join(ex_dir, f"real_rollouts_rewards.tex"), "w") as tab_file:
print(table_latex_str, file=tab_file)
| [
"torch.mean",
"pyrado.PathErr",
"pyrado.utils.argparser.get_argparser",
"numpy.argsort",
"pyrado.ValueErr",
"torch.std",
"torch.max",
"pyrado.utils.experiments.load_rollouts_from_dir",
"tabulate.tabulate",
"pyrado.logger.experiment.ask_for_experiment",
"os.path.join",
"os.listdir"
] | [((2432, 2462), 'pyrado.utils.experiments.load_rollouts_from_dir', 'load_rollouts_from_dir', (['ex_dir'], {}), '(ex_dir)\n', (2454, 2462), False, 'from pyrado.utils.experiments import load_rollouts_from_dir\n'), ((2548, 2566), 'os.listdir', 'os.listdir', (['ex_dir'], {}), '(ex_dir)\n', (2558, 2566), False, 'import os\n'), ((3592, 3613), 'numpy.argsort', 'np.argsort', (['iter_idcs'], {}), '(iter_idcs)\n', (3602, 3613), True, 'import numpy as np\n'), ((2301, 2350), 'pyrado.logger.experiment.ask_for_experiment', 'ask_for_experiment', ([], {'hparam_list': 'args.show_hparams'}), '(hparam_list=args.show_hparams)\n', (2319, 2350), False, 'from pyrado.logger.experiment import ask_for_experiment, load_dict_from_yaml\n'), ((2862, 2918), 'pyrado.PathErr', 'pyrado.PathErr', ([], {'msg': '"""No hyperparam file could be found."""'}), "(msg='No hyperparam file could be found.')\n", (2876, 2918), False, 'import pyrado\n'), ((3813, 3880), 'torch.mean', 'to.mean', (['rewards[i * num_real_rollouts:(i + 1) * num_real_rollouts]'], {}), '(rewards[i * num_real_rollouts:(i + 1) * num_real_rollouts])\n', (3820, 3880), True, 'import torch as to\n'), ((3904, 3970), 'torch.std', 'to.std', (['rewards[i * num_real_rollouts:(i + 1) * num_real_rollouts]'], {}), '(rewards[i * num_real_rollouts:(i + 1) * num_real_rollouts])\n', (3910, 3970), True, 'import torch as to\n'), ((3994, 4060), 'torch.max', 'to.max', (['rewards[i * num_real_rollouts:(i + 1) * num_real_rollouts]'], {}), '(rewards[i * num_real_rollouts:(i + 1) * num_real_rollouts])\n', (4000, 4060), True, 'import torch as to\n'), ((4272, 4296), 'tabulate.tabulate', 'tabulate', (['table', 'headers'], {}), '(table, headers)\n', (4280, 4296), False, 'from tabulate import tabulate\n'), ((4428, 4470), 'tabulate.tabulate', 'tabulate', (['table', 'headers'], {'tablefmt': '"""latex"""'}), "(table, headers, tablefmt='latex')\n", (4436, 4470), False, 'from tabulate import tabulate\n'), ((2208, 2223), 'pyrado.utils.argparser.get_argparser', 'get_argparser', ([], {}), '()\n', (2221, 2223), False, 'from pyrado.utils.argparser import get_argparser\n'), ((3338, 3403), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'msg': '"""No `num_real_rollouts` argument was found."""'}), "(msg='No `num_real_rollouts` argument was found.')\n", (3353, 3403), False, 'import pyrado\n'), ((2684, 2711), 'os.path.join', 'osp.join', (['ex_dir', 'file_name'], {}), '(ex_dir, file_name)\n', (2692, 2711), True, 'import os.path as osp\n'), ((4489, 4535), 'os.path.join', 'osp.join', (['ex_dir', 'f"""real_rollouts_rewards.tex"""'], {}), "(ex_dir, f'real_rollouts_rewards.tex')\n", (4497, 4535), True, 'import os.path as osp\n'), ((2799, 2826), 'os.path.join', 'osp.join', (['ex_dir', 'file_name'], {}), '(ex_dir, file_name)\n', (2807, 2826), True, 'import os.path as osp\n')] |
import sys
if __name__ == '__main__':
N = int(sys.stdin.readline())
rating = [int(sys.stdin.readline()) for i in range(N)]
candies = [1] * N
for i in range(N - 1):
if rating[i + 1] > rating[i]:
candies[i + 1] = candies[i] + 1
for i in reversed(range(N - 1)):
if rating[i] > rating[i + 1] and candies[i] <= candies[i + 1]:
candies[i] = candies[i + 1] + 1
print(sum(candies))
| [
"sys.stdin.readline"
] | [((51, 71), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (69, 71), False, 'import sys\n'), ((91, 111), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (109, 111), False, 'import sys\n')] |
import numpy as np
def one_step_lookahead(environment, state, V, discount_factor):
"""
Helper function to calculate a state-value function.
:param environment: Initialized OpenAI gym environment object.
:param state: Agent's state to consider (integer).
:param V: The value to use as an estimator. Vector of length nS.
:param discount_factor: MDP discount factor.
:return: A vector of length nA containing the expected value of each action.
"""
action_values = np.zeros(environment.nA)
for action in range(environment.nA):
for probability, next_state, reward, terminated in environment.P[state][action]:
action_values[action] += probability * (reward + discount_factor * V[next_state])
return action_values
def policy_evaluation(policy, environment, discount_factor=1.0, theta=1e-9, max_iter=1e9):
"""
Evaluate a policy given a deterministic environment.
:param policy: Matrix of a size nSxnA, each cell represents a probability of taking action a in state s.
:param environment: Initialized OpenAI gym environment object.
:param discount_factor: MDP discount factor. Float in range from 0 to 1.
:param theta: A threshold of a value function change.
:param max_iter: Maximum number of iteration to prevent infinite loops.
:return: A vector of size nS, which represent a value function for each state.
"""
# Number of evaluation iterations
evaluation_iterations = 1
# Initialize a value function for each state as zero
V = np.zeros(environment.nS)
# Repeat until value change is below the threshold
for i in range(int(max_iter)):
# Initialize a change of value function as zero
delta = 0
# Iterate though each state
for state in range(environment.nS):
# Initial a new value of current state
v = 0
# Try all possible actions which can be taken from this state
for action, action_probability in enumerate(policy[state]):
# Evaluate how good each next state will be
for state_probability, next_state, reward, terminated in environment.P[state][action]:
# Calculate the expected value
v += action_probability * state_probability * (reward + discount_factor * V[next_state])
# Calculate the absolute change of value function
delta = max(delta, np.abs(V[state] - v))
# Update value function
V[state] = v
evaluation_iterations += 1
# Terminate if value change is insignificant
if delta < theta:
print(f'Policy evaluated in {evaluation_iterations} iterations.')
return V
def policy_iteration(environment, discount_factor=1.0, max_iter=1e9):
"""
Policy iteration algorithm to solve MDP.
:param environment: Initialized OpenAI gym environment object.
:param discount_factor: MPD discount factor. Float in range from 0 to 1.
:param max_iter: Maximum number of iterations to prevent infinite loops.
:return: tuple(policy, V), which consist of an optimal policy matrix and value function for each state.
"""
# Start with a random policy
#num states x num actions / num actions
policy = np.ones([environment.nS, environment.nA]) / environment.nA
# Initialize counter of evaluated policies
evaluated_policies = 1
# Repeat until convergence or critical number of iterations reached
for i in range(int(max_iter)):
stable_policy = True
# Evaluate current policy
V = policy_evaluation(policy, environment, discount_factor=discount_factor)
# Go through each state and try to improve actions that were taken
for state in range(environment.nS):
# Choose the best action in a current state under current policy
current_action = np.argmax(policy[state])
# Look one step ahead and evaluate if current action is optimal
# We will try every possible action in a current state
action_value = one_step_lookahead(environment, state, V, discount_factor)
# Select a better action
best_action = np.argmax(action_value)
# If action didn't change
if current_action != best_action:
stable_policy = True
# Greedy policy update
policy[state] = np.eye(environment.nA)[best_action]
evaluated_policies += 1
# If the algorithm converged and policy is not changing anymore, than return final policy and value function
if stable_policy:
print(f'Evaluated {evaluated_policies} policies.')
return policy, V
def value_iteration(environment, discount_factor=1.0, theta=1e-9, max_iterations=1e9):
"""
Value Iteration algorithm to solve MDP.
:param environment: Initialized OpenAI environment object.
:param theta: Stopping threshold. If the value of all states changes less than theta in one iteration - we are done.
:param discount_factor: MDP discount factor.
:param max_iterations: Maximum number of iterations that can be ever performed (to prevent infinite loops).
:return: tuple (policy, V) which contains optimal policy and optimal value function.
"""
# Initialize state-value function with zeros for each environment state
V = np.zeros(environment.nS)
for i in range(int(max_iterations)):
# Early stopping condition
delta = 0
# Update each state
for state in range(environment.nS):
# Do a one-step lookahead to calculate state-action values
action_value = one_step_lookahead(environment, state, V, discount_factor)
# Select best action to perform based on the highest state-action value
best_action_value = np.max(action_value)
# Calculate change in value
delta = max(delta, np.abs(V[state] - best_action_value))
# Update the value function for current state
V[state] = best_action_value
# Check if we can stop
if delta < theta:
print(f'Value-iteration converged at iteration#{i}.')
break
# Create a deterministic policy using the optimal value function
policy = np.zeros([environment.nS, environment.nA])
for state in range(environment.nS):
# One step lookahead to find the best action for this state
action_value = one_step_lookahead(environment, state, V, discount_factor)
# Select best action based on the highest state-action value
best_action = np.argmax(action_value)
# Update the policy to perform a better action at a current state
policy[state, best_action] = 1.0
return policy, V
| [
"numpy.abs",
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"numpy.max",
"numpy.eye"
] | [((502, 526), 'numpy.zeros', 'np.zeros', (['environment.nA'], {}), '(environment.nA)\n', (510, 526), True, 'import numpy as np\n'), ((1553, 1577), 'numpy.zeros', 'np.zeros', (['environment.nS'], {}), '(environment.nS)\n', (1561, 1577), True, 'import numpy as np\n'), ((5437, 5461), 'numpy.zeros', 'np.zeros', (['environment.nS'], {}), '(environment.nS)\n', (5445, 5461), True, 'import numpy as np\n'), ((6362, 6404), 'numpy.zeros', 'np.zeros', (['[environment.nS, environment.nA]'], {}), '([environment.nS, environment.nA])\n', (6370, 6404), True, 'import numpy as np\n'), ((3315, 3356), 'numpy.ones', 'np.ones', (['[environment.nS, environment.nA]'], {}), '([environment.nS, environment.nA])\n', (3322, 3356), True, 'import numpy as np\n'), ((6689, 6712), 'numpy.argmax', 'np.argmax', (['action_value'], {}), '(action_value)\n', (6698, 6712), True, 'import numpy as np\n'), ((3933, 3957), 'numpy.argmax', 'np.argmax', (['policy[state]'], {}), '(policy[state])\n', (3942, 3957), True, 'import numpy as np\n'), ((4252, 4275), 'numpy.argmax', 'np.argmax', (['action_value'], {}), '(action_value)\n', (4261, 4275), True, 'import numpy as np\n'), ((5906, 5926), 'numpy.max', 'np.max', (['action_value'], {}), '(action_value)\n', (5912, 5926), True, 'import numpy as np\n'), ((2461, 2481), 'numpy.abs', 'np.abs', (['(V[state] - v)'], {}), '(V[state] - v)\n', (2467, 2481), True, 'import numpy as np\n'), ((4462, 4484), 'numpy.eye', 'np.eye', (['environment.nA'], {}), '(environment.nA)\n', (4468, 4484), True, 'import numpy as np\n'), ((5999, 6035), 'numpy.abs', 'np.abs', (['(V[state] - best_action_value)'], {}), '(V[state] - best_action_value)\n', (6005, 6035), True, 'import numpy as np\n')] |
# BuildTarget: images/interfaceDefaultLightPlug.png
# BuildTarget: images/interfaceLightLinkSetupGraphEditor.png
# BuildTarget: images/interfaceLightSetGraphEditor.png
# BuildTarget: images/interfaceLightSetNodeEditor.png
# BuildTarget: images/interfaceLinkedLightsPlug.png
# BuildTarget: images/taskLightLinkingSetExpressionLocation.png
# BuildTarget: images/taskLightLinkingSetExpressionSet.png
import os
import subprocess32 as subprocess
import tempfile
import time
import imath
import IECore
import Gaffer
import GafferScene
import GafferUI
import GafferSceneUI
import GafferAppleseed
scriptWindow = GafferUI.ScriptWindow.acquire( script )
viewer = scriptWindow.getLayout().editors( GafferUI.Viewer )[0]
graphEditor = scriptWindow.getLayout().editors( GafferUI.GraphEditor )[0]
hierarchyView = scriptWindow.getLayout().editors( GafferSceneUI.HierarchyView )[0]
# Base graph
script["Sphere"] = GafferScene.Sphere()
script["Group"] = GafferScene.Group()
script["Light"] = GafferAppleseed.AppleseedLight()
script["Group"]["in"]["in0"].setInput( script["Sphere"]["out"] )
script["Group"]["in"]["in1"].setInput( script["Light"]["out"] )
script["PathFilter"] = GafferScene.PathFilter()
script["PathFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
script["StandardAttributes"] = GafferScene.StandardAttributes()
script["StandardAttributes"]["in"].setInput( script["Group"]["out"] )
script["StandardAttributes"]["filter"].setInput( script["PathFilter"]["out"] )
script["StandardAttributes"]["attributes"]["linkedLights"]["enabled"].setValue( True )
script["StandardAttributes"]["attributes"]["linkedLights"]["value"].setValue( "/group/light" )
script.addChild( script["Sphere"] )
script.addChild( script["Light"] )
script.addChild( script["Group"] )
script.addChild( script["StandardAttributes"] )
script.addChild( script["PathFilter"] )
# Interface: the Default Light plug of a light node in the Node Editor
# TODO: "AppleseedLight" label clearly visible; figure out a way to fake "ArnoldLight" label
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Light"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
GafferUI.PlugValueWidget.acquire( script["Light"]["defaultLight"] )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = "images/interfaceDefaultLightPlug.png" )
nodeEditorWindow.parent().close()
del nodeEditorWindow
# Interface: the linkedLights attribute in the Scene Inspector
script.selection().clear()
script.selection().add( script["StandardAttributes"] )
__path = "/group/sphere"
__paths = IECore.PathMatcher( [ __path ] )
GafferSceneUI.ContextAlgo.setSelectedPaths( script.context(), __paths )
from GafferSceneUI.SceneInspector import __AttributesSection
for imageName, sectionClass in [
( "LinkedLightsAttribute.png", __AttributesSection )
] :
section = sectionClass()
section._Section__collapsible.setCollapsed( False )
with GafferUI.Window( "Property" ) as window :
sceneInspector = GafferSceneUI.SceneInspector( script, sections = [ section ] )
sceneInspector.setNodeSet( Gaffer.StandardSet( [ script["StandardAttributes"] ] ) )
sceneInspector.setTargetPaths( [ __path ] )
window.resizeToFitChild()
window.setVisible( True )
GafferUI.WidgetAlgo.grab( widget = sceneInspector, imagePath = "images/interface" + imageName )
# Interface: a StandardAttributes node downstream of an object node
script.selection().clear()
graphEditor.frame( script.children( Gaffer.Node ) )
GafferUI.WidgetAlgo.grab( widget = graphEditor, imagePath = "images/interfaceLightLinkSetupGraphEditor.png" )
# Interface: the empty Linked Lights plug of a StandardAttributes node in the Node Editor
script["StandardAttributes"]["attributes"]["linkedLights"]["value"].setValue( "" )
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["StandardAttributes"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
GafferUI.PlugValueWidget.acquire( script["StandardAttributes"]["attributes"]["linkedLights"] )
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = "images/interfaceLinkedLightsPlug.png" )
nodeEditorWindow.parent().close()
del nodeEditorWindow
# Task: the light linking set expression with a location
script["StandardAttributes"]["attributes"]["linkedLights"]["value"].setValue( "/group/light" )
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["StandardAttributes"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = "images/taskLightLinkingSetExpressionLocation.png" )
nodeEditorWindow.parent().close()
del nodeEditorWindow
# Task: a Set node in the Node Editor
script["Set"] = GafferScene.Set()
script["Set"]["in"].setInput( script["Light"]["out"] )
script["Set"]["name"].setValue( "myLights" )
script["Set"]["paths"].setValue( IECore.StringVectorData( [ "/light" ] ) )
script["Group"]["in"][1].setInput( script["Set"]["out"] )
script.addChild( script["Set"] )
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["Set"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = "images/interfaceLightSetNodeEditor.png" )
nodeEditorWindow.parent().close()
del nodeEditorWindow
# Task: a Set node downstream of a light node in the Graph Editor
graphGadget = GafferUI.GraphGadget( script )
graphGadget.getLayout().layoutNodes( graphGadget )
graphEditor.frame( Gaffer.StandardSet( [ script["Set"] ] ) )
GafferUI.WidgetAlgo.grab( widget = graphEditor, imagePath = "images/interfaceLightSetGraphEditor.png" )
# Task: the light linking set expression with a set
script["StandardAttributes"]["attributes"]["linkedLights"]["value"].setValue( "myLights" )
nodeEditorWindow = GafferUI.NodeEditor.acquire( script["StandardAttributes"], floating = True )
nodeEditorWindow._qtWidget().setFocus()
GafferUI.WidgetAlgo.grab( widget = nodeEditorWindow, imagePath = "images/taskLightLinkingSetExpressionSet.png" )
nodeEditorWindow.parent().close()
del nodeEditorWindow
| [
"GafferScene.Sphere",
"GafferUI.PlugValueWidget.acquire",
"GafferUI.WidgetAlgo.grab",
"GafferSceneUI.SceneInspector",
"GafferUI.ScriptWindow.acquire",
"GafferAppleseed.AppleseedLight",
"GafferScene.StandardAttributes",
"IECore.PathMatcher",
"GafferScene.Group",
"GafferUI.NodeEditor.acquire",
"Ga... | [((607, 644), 'GafferUI.ScriptWindow.acquire', 'GafferUI.ScriptWindow.acquire', (['script'], {}), '(script)\n', (636, 644), False, 'import GafferUI\n'), ((901, 921), 'GafferScene.Sphere', 'GafferScene.Sphere', ([], {}), '()\n', (919, 921), False, 'import GafferScene\n'), ((940, 959), 'GafferScene.Group', 'GafferScene.Group', ([], {}), '()\n', (957, 959), False, 'import GafferScene\n'), ((978, 1010), 'GafferAppleseed.AppleseedLight', 'GafferAppleseed.AppleseedLight', ([], {}), '()\n', (1008, 1010), False, 'import GafferAppleseed\n'), ((1163, 1187), 'GafferScene.PathFilter', 'GafferScene.PathFilter', ([], {}), '()\n', (1185, 1187), False, 'import GafferScene\n'), ((1308, 1340), 'GafferScene.StandardAttributes', 'GafferScene.StandardAttributes', ([], {}), '()\n', (1338, 1340), False, 'import GafferScene\n'), ((2050, 2109), 'GafferUI.NodeEditor.acquire', 'GafferUI.NodeEditor.acquire', (["script['Light']"], {'floating': '(True)'}), "(script['Light'], floating=True)\n", (2077, 2109), False, 'import GafferUI\n'), ((2154, 2219), 'GafferUI.PlugValueWidget.acquire', 'GafferUI.PlugValueWidget.acquire', (["script['Light']['defaultLight']"], {}), "(script['Light']['defaultLight'])\n", (2186, 2219), False, 'import GafferUI\n'), ((2222, 2326), 'GafferUI.WidgetAlgo.grab', 'GafferUI.WidgetAlgo.grab', ([], {'widget': 'nodeEditorWindow', 'imagePath': '"""images/interfaceDefaultLightPlug.png"""'}), "(widget=nodeEditorWindow, imagePath=\n 'images/interfaceDefaultLightPlug.png')\n", (2246, 2326), False, 'import GafferUI\n'), ((2564, 2592), 'IECore.PathMatcher', 'IECore.PathMatcher', (['[__path]'], {}), '([__path])\n', (2582, 2592), False, 'import IECore\n'), ((3468, 3576), 'GafferUI.WidgetAlgo.grab', 'GafferUI.WidgetAlgo.grab', ([], {'widget': 'graphEditor', 'imagePath': '"""images/interfaceLightLinkSetupGraphEditor.png"""'}), "(widget=graphEditor, imagePath=\n 'images/interfaceLightLinkSetupGraphEditor.png')\n", (3492, 3576), False, 'import GafferUI\n'), ((3771, 3843), 'GafferUI.NodeEditor.acquire', 'GafferUI.NodeEditor.acquire', (["script['StandardAttributes']"], {'floating': '(True)'}), "(script['StandardAttributes'], floating=True)\n", (3798, 3843), False, 'import GafferUI\n'), ((3888, 3985), 'GafferUI.PlugValueWidget.acquire', 'GafferUI.PlugValueWidget.acquire', (["script['StandardAttributes']['attributes']['linkedLights']"], {}), "(script['StandardAttributes']['attributes']\n ['linkedLights'])\n", (3920, 3985), False, 'import GafferUI\n'), ((3983, 4087), 'GafferUI.WidgetAlgo.grab', 'GafferUI.WidgetAlgo.grab', ([], {'widget': 'nodeEditorWindow', 'imagePath': '"""images/interfaceLinkedLightsPlug.png"""'}), "(widget=nodeEditorWindow, imagePath=\n 'images/interfaceLinkedLightsPlug.png')\n", (4007, 4087), False, 'import GafferUI\n'), ((4316, 4388), 'GafferUI.NodeEditor.acquire', 'GafferUI.NodeEditor.acquire', (["script['StandardAttributes']"], {'floating': '(True)'}), "(script['StandardAttributes'], floating=True)\n", (4343, 4388), False, 'import GafferUI\n'), ((4433, 4549), 'GafferUI.WidgetAlgo.grab', 'GafferUI.WidgetAlgo.grab', ([], {'widget': 'nodeEditorWindow', 'imagePath': '"""images/taskLightLinkingSetExpressionLocation.png"""'}), "(widget=nodeEditorWindow, imagePath=\n 'images/taskLightLinkingSetExpressionLocation.png')\n", (4457, 4549), False, 'import GafferUI\n'), ((4661, 4678), 'GafferScene.Set', 'GafferScene.Set', ([], {}), '()\n', (4676, 4678), False, 'import GafferScene\n'), ((4964, 5021), 'GafferUI.NodeEditor.acquire', 'GafferUI.NodeEditor.acquire', (["script['Set']"], {'floating': '(True)'}), "(script['Set'], floating=True)\n", (4991, 5021), False, 'import GafferUI\n'), ((5066, 5172), 'GafferUI.WidgetAlgo.grab', 'GafferUI.WidgetAlgo.grab', ([], {'widget': 'nodeEditorWindow', 'imagePath': '"""images/interfaceLightSetNodeEditor.png"""'}), "(widget=nodeEditorWindow, imagePath=\n 'images/interfaceLightSetNodeEditor.png')\n", (5090, 5172), False, 'import GafferUI\n'), ((5310, 5338), 'GafferUI.GraphGadget', 'GafferUI.GraphGadget', (['script'], {}), '(script)\n', (5330, 5338), False, 'import GafferUI\n'), ((5453, 5555), 'GafferUI.WidgetAlgo.grab', 'GafferUI.WidgetAlgo.grab', ([], {'widget': 'graphEditor', 'imagePath': '"""images/interfaceLightSetGraphEditor.png"""'}), "(widget=graphEditor, imagePath=\n 'images/interfaceLightSetGraphEditor.png')\n", (5477, 5555), False, 'import GafferUI\n'), ((5720, 5792), 'GafferUI.NodeEditor.acquire', 'GafferUI.NodeEditor.acquire', (["script['StandardAttributes']"], {'floating': '(True)'}), "(script['StandardAttributes'], floating=True)\n", (5747, 5792), False, 'import GafferUI\n'), ((5837, 5948), 'GafferUI.WidgetAlgo.grab', 'GafferUI.WidgetAlgo.grab', ([], {'widget': 'nodeEditorWindow', 'imagePath': '"""images/taskLightLinkingSetExpressionSet.png"""'}), "(widget=nodeEditorWindow, imagePath=\n 'images/taskLightLinkingSetExpressionSet.png')\n", (5861, 5948), False, 'import GafferUI\n'), ((1228, 1270), 'IECore.StringVectorData', 'IECore.StringVectorData', (["['/group/sphere']"], {}), "(['/group/sphere'])\n", (1251, 1270), False, 'import IECore\n'), ((3224, 3318), 'GafferUI.WidgetAlgo.grab', 'GafferUI.WidgetAlgo.grab', ([], {'widget': 'sceneInspector', 'imagePath': "('images/interface' + imageName)"}), "(widget=sceneInspector, imagePath=\n 'images/interface' + imageName)\n", (3248, 3318), False, 'import GafferUI\n'), ((4812, 4847), 'IECore.StringVectorData', 'IECore.StringVectorData', (["['/light']"], {}), "(['/light'])\n", (4835, 4847), False, 'import IECore\n'), ((5411, 5446), 'Gaffer.StandardSet', 'Gaffer.StandardSet', (["[script['Set']]"], {}), "([script['Set']])\n", (5429, 5446), False, 'import Gaffer\n'), ((2910, 2937), 'GafferUI.Window', 'GafferUI.Window', (['"""Property"""'], {}), "('Property')\n", (2925, 2937), False, 'import GafferUI\n'), ((2972, 3028), 'GafferSceneUI.SceneInspector', 'GafferSceneUI.SceneInspector', (['script'], {'sections': '[section]'}), '(script, sections=[section])\n', (3000, 3028), False, 'import GafferSceneUI\n'), ((3064, 3114), 'Gaffer.StandardSet', 'Gaffer.StandardSet', (["[script['StandardAttributes']]"], {}), "([script['StandardAttributes']])\n", (3082, 3114), False, 'import Gaffer\n')] |
import numpy as np
def hit_rate(array1, array2):
"""
calculate the hit rate based upon 2 boolean maps. (i.e. where are both 1)
"""
# count the number of cells that are flooded in both array1 and 2
idx_both = np.sum(np.logical_and(array1, array2))
idx_1 = np.sum(array1)
return float(idx_both)/float(idx_1)
def false_alarm_rate(array1, array2):
"""
calculate the false alarm rate based upon 2 boolean maps. (i.e. amount of cells where array2 is True but array1 False)
"""
# count the number of cells that are flooded in both array1 and 2
idx_2_only = np.sum(np.logical_and(array2, array1!=1))
idx_2_total = np.sum(array2)
return float(idx_2_only)/float(idx_2_total)
def critical_success(array1, array2):
"""
calculate the critical success rate based upon 2 boolean maps.
"""
idx_both = np.sum(np.logical_and(array1, array2))
idx_either = np.sum(np.logical_or(array1, array2))
return float(idx_both)/float(idx_either)
def contingency_map(array1, array2, threshold1=0., threshold2=0.):
"""
Establish the contingency between array1 and array2.
Returns an array where
1 means only array2 gives a value > threshold1,
2 means only array1 gives a values > threshold2,
3 means array1 gives a value > threshold1, and array2 a value > threshold2
0 means both arrays do not give a value > threshold1, 2 respectively
function returns the threshold exceedance (0-1) of array 1 and 2, as well as the contingency map
"""
array1_thres = array1 > threshold1
array2_thres = array2 > threshold2
contingency = np.zeros(array1.shape)
contingency += np.int16(array2_thres)
contingency += np.int16(array1_thres)*2
return array1_thres, array2_thres, contingency
def calc_contingency(bench_d, model_d, bench_thres, model_thres):
"""
determines hit rate, false alarm ratio, critical success index, and contingency map for a given combination of simulated and observed flood extent.
"""
x_bench = bench_d.width
y_bench = bench_d.height
bench_data = bench_d.read(1)
fill_bench = bench_d.nodata
extent_bench = bench_d.bounds
x_model = model_d.width
y_model = model_d.height
model_data = model_d.read(1)
fill_model = model_d.nodata
bench_data[bench_data==fill_bench] = 0.
model_data[model_data==fill_model] = 0.
flood1, flood2, cont_arr = contingency_map(bench_data, model_data, threshold1=bench_thres, threshold2=model_thres)
hr = hit_rate(flood1, flood2)
far = false_alarm_rate(flood1, flood2)
csi = critical_success(flood1, flood2)
return hr, far, csi, cont_arr | [
"numpy.sum",
"numpy.logical_and",
"numpy.zeros",
"numpy.logical_or",
"numpy.int16"
] | [((280, 294), 'numpy.sum', 'np.sum', (['array1'], {}), '(array1)\n', (286, 294), True, 'import numpy as np\n'), ((665, 679), 'numpy.sum', 'np.sum', (['array2'], {}), '(array2)\n', (671, 679), True, 'import numpy as np\n'), ((1640, 1662), 'numpy.zeros', 'np.zeros', (['array1.shape'], {}), '(array1.shape)\n', (1648, 1662), True, 'import numpy as np\n'), ((1682, 1704), 'numpy.int16', 'np.int16', (['array2_thres'], {}), '(array2_thres)\n', (1690, 1704), True, 'import numpy as np\n'), ((236, 266), 'numpy.logical_and', 'np.logical_and', (['array1', 'array2'], {}), '(array1, array2)\n', (250, 266), True, 'import numpy as np\n'), ((612, 647), 'numpy.logical_and', 'np.logical_and', (['array2', '(array1 != 1)'], {}), '(array2, array1 != 1)\n', (626, 647), True, 'import numpy as np\n'), ((878, 908), 'numpy.logical_and', 'np.logical_and', (['array1', 'array2'], {}), '(array1, array2)\n', (892, 908), True, 'import numpy as np\n'), ((934, 963), 'numpy.logical_or', 'np.logical_or', (['array1', 'array2'], {}), '(array1, array2)\n', (947, 963), True, 'import numpy as np\n'), ((1724, 1746), 'numpy.int16', 'np.int16', (['array1_thres'], {}), '(array1_thres)\n', (1732, 1746), True, 'import numpy as np\n')] |
import apsis.actions
import apsis.lib.json
from apsis.lib.py import tupleize
from apsis.lib import email
#-------------------------------------------------------------------------------
# FIXME: jinja2?
TEMPLATE = """<!doctype html>
<html>
<head>
<title>{subject}</title>
</head>
<body>
<p>
program: <code>{program}</code>
</p>
<pre>{output}</pre>
</body>
</html>
"""
class EmailAction:
"""
Action that sends an HTML email summarizing the run.
"""
def __init__(self, to=(), *, from_=None, condition=None):
self.__to = tupleize(to)
self.__from = from_
self.__condition = condition
@classmethod
def from_jso(cls, jso):
with apsis.lib.json.check_schema(jso) as pop:
to = pop("to", str)
from_ = pop("from", str)
cnd = pop("if", apsis.actions.Condition.from_jso, default=None)
return cls(to, from_=from_, condition=cnd)
def to_jso(self):
cnd = None if self.__condition is None else self.__condition.to_jso()
return {
"to" : list(self.__to),
"from" : self.__from,
"if" : cnd,
}
async def __call__(self, apsis, run):
if self.__condition is not None and not self.__condition(run):
return
subject = f"Apsis {run.run_id}: {run.inst}: {run.state.name}"
program = str(run.program)
output_meta = apsis.outputs.get_metadata(run.run_id)
if "output" in output_meta:
output = apsis.outputs.get_data(run.run_id, "output").decode()
else:
output = ""
body = TEMPLATE.format(**locals())
smtp_cfg = apsis.cfg.get("smtp", {})
email.send_html(
self.__to, subject, body, from_=self.__from, smtp_cfg=smtp_cfg)
| [
"apsis.lib.py.tupleize",
"apsis.lib.email.send_html"
] | [((554, 566), 'apsis.lib.py.tupleize', 'tupleize', (['to'], {}), '(to)\n', (562, 566), False, 'from apsis.lib.py import tupleize\n'), ((1730, 1809), 'apsis.lib.email.send_html', 'email.send_html', (['self.__to', 'subject', 'body'], {'from_': 'self.__from', 'smtp_cfg': 'smtp_cfg'}), '(self.__to, subject, body, from_=self.__from, smtp_cfg=smtp_cfg)\n', (1745, 1809), False, 'from apsis.lib import email\n')] |
# -*- coding:utf-8 -*-
"""
Asynchronous driven quantitative trading framework.
Author: HuangTao
Date: 2017/04/26
Email: <EMAIL>
"""
import signal
import asyncio
from quant.utils import logger
from quant.config import config
class Quant:
""" Asynchronous driven quantitative trading framework.
"""
def __init__(self):
self.loop = None
self.event_center = None
def initialize(self, config_module=None):
""" Initialize.
Args:
config_module: config file path, normally it"s a json file.
"""
self._get_event_loop()
self._load_settings(config_module)
self._init_logger()
self._init_event_center()
self._do_heartbeat()
def start(self):
"""Start the event loop."""
def keyboard_interrupt(s, f):
print("KeyboardInterrupt (ID: {}) has been caught. Cleaning up...".format(s))
self.loop.stop()
signal.signal(signal.SIGINT, keyboard_interrupt)
logger.info("start io loop ...", caller=self)
self.loop.run_forever()
def stop(self):
"""Stop the event loop."""
logger.info("stop io loop.", caller=self)
self.loop.stop()
def _get_event_loop(self):
""" Get a main io loop. """
if not self.loop:
self.loop = asyncio.get_event_loop()
return self.loop
def _load_settings(self, config_module):
""" Load config settings.
Args:
config_module: config file path, normally it"s a json file.
"""
config.loads(config_module)
def _init_logger(self):
"""Initialize logger."""
console = config.log.get("console", True)
level = config.log.get("level", "DEBUG")
path = config.log.get("path", "/tmp/logs/Quant")
name = config.log.get("name", "quant.log")
clear = config.log.get("clear", False)
backup_count = config.log.get("backup_count", 0)
if console:
logger.initLogger(level)
else:
logger.initLogger(level, path, name, clear, backup_count)
def _init_event_center(self):
"""Initialize event center."""
if config.rabbitmq:
from quant.event import EventCenter
self.event_center = EventCenter()
self.loop.run_until_complete(self.event_center.connect())
def _do_heartbeat(self):
"""Start server heartbeat."""
from quant.heartbeat import heartbeat
self.loop.call_later(0.5, heartbeat.ticker)
quant = Quant()
| [
"asyncio.get_event_loop",
"quant.event.EventCenter",
"quant.utils.logger.initLogger",
"quant.utils.logger.info",
"quant.config.config.loads",
"quant.config.config.log.get",
"signal.signal"
] | [((955, 1003), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'keyboard_interrupt'], {}), '(signal.SIGINT, keyboard_interrupt)\n', (968, 1003), False, 'import signal\n'), ((1013, 1058), 'quant.utils.logger.info', 'logger.info', (['"""start io loop ..."""'], {'caller': 'self'}), "('start io loop ...', caller=self)\n", (1024, 1058), False, 'from quant.utils import logger\n'), ((1155, 1196), 'quant.utils.logger.info', 'logger.info', (['"""stop io loop."""'], {'caller': 'self'}), "('stop io loop.', caller=self)\n", (1166, 1196), False, 'from quant.utils import logger\n'), ((1577, 1604), 'quant.config.config.loads', 'config.loads', (['config_module'], {}), '(config_module)\n', (1589, 1604), False, 'from quant.config import config\n'), ((1685, 1716), 'quant.config.config.log.get', 'config.log.get', (['"""console"""', '(True)'], {}), "('console', True)\n", (1699, 1716), False, 'from quant.config import config\n'), ((1733, 1765), 'quant.config.config.log.get', 'config.log.get', (['"""level"""', '"""DEBUG"""'], {}), "('level', 'DEBUG')\n", (1747, 1765), False, 'from quant.config import config\n'), ((1781, 1822), 'quant.config.config.log.get', 'config.log.get', (['"""path"""', '"""/tmp/logs/Quant"""'], {}), "('path', '/tmp/logs/Quant')\n", (1795, 1822), False, 'from quant.config import config\n'), ((1838, 1873), 'quant.config.config.log.get', 'config.log.get', (['"""name"""', '"""quant.log"""'], {}), "('name', 'quant.log')\n", (1852, 1873), False, 'from quant.config import config\n'), ((1890, 1920), 'quant.config.config.log.get', 'config.log.get', (['"""clear"""', '(False)'], {}), "('clear', False)\n", (1904, 1920), False, 'from quant.config import config\n'), ((1944, 1977), 'quant.config.config.log.get', 'config.log.get', (['"""backup_count"""', '(0)'], {}), "('backup_count', 0)\n", (1958, 1977), False, 'from quant.config import config\n'), ((1340, 1364), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1362, 1364), False, 'import asyncio\n'), ((2010, 2034), 'quant.utils.logger.initLogger', 'logger.initLogger', (['level'], {}), '(level)\n', (2027, 2034), False, 'from quant.utils import logger\n'), ((2061, 2118), 'quant.utils.logger.initLogger', 'logger.initLogger', (['level', 'path', 'name', 'clear', 'backup_count'], {}), '(level, path, name, clear, backup_count)\n', (2078, 2118), False, 'from quant.utils import logger\n'), ((2301, 2314), 'quant.event.EventCenter', 'EventCenter', ([], {}), '()\n', (2312, 2314), False, 'from quant.event import EventCenter\n')] |
"""
extract configuration
"""
import re
_prompt = "ConfigExtractor > "
def _printConfig(str):
print('%s%s' % (_prompt,str))
def _Service(str):
tmpSvcNew = None
tmpSvcOld = None
# get new service
try:
svcMgr = theApp.serviceMgr()
tmpSvcNew = getattr(svcMgr,str)
except Exception:
pass
# get old service
try:
tmpSvcOld = Service(str)
except Exception:
pass
# return old one for 12.0.6
if tmpSvcOld is not None:
return tmpSvcOld
return tmpSvcNew
def _Algorithm(str):
try:
return Algorithm(str)
except Exception:
return None
######################
# input
EventSelector = _Service( "EventSelector" )
if hasattr(EventSelector,'InputCollections') and hasattr(EventSelector.InputCollections,'__len__') \
and len(EventSelector.InputCollections):
# POOL
if hasattr(EventSelector,"CollectionType") and hasattr(EventSelector.CollectionType,'__len__') \
and len(EventSelector.CollectionType) and EventSelector.CollectionType == "ExplicitROOT":
# tag collection
_printConfig('Input=COLL')
# reference
try:
if EventSelector.RefName is not None:
_printConfig('Input=COLLREF %s' % EventSelector.RefName)
except Exception:
pass
# query
try:
if EventSelector.Query is not None:
_printConfig('Input=COLLQUERY %s' % EventSelector.Query)
except Exception:
pass
else:
# normal POOL
_printConfig('Input=POOL')
# file list
str = 'InputFiles '
for file in EventSelector.InputCollections:
str += '%s ' % file.split('/')[-1]
_printConfig(str)
else:
# ByteStream
noInputFlag = True
# both _Service and Service need to be checked due to Configurable
compList = []
try:
compList.append(_Service( "ByteStreamInputSvc" ))
except Exception:
pass
try:
compList.append(Service( "ByteStreamInputSvc" ))
except Exception:
pass
for ByteStreamInputSvc in compList:
if (hasattr(ByteStreamInputSvc,'FullFileName') and hasattr(ByteStreamInputSvc.FullFileName,'__len__')
and len(ByteStreamInputSvc.FullFileName)) or \
(hasattr(ByteStreamInputSvc,'FilePrefix') and hasattr(ByteStreamInputSvc.FilePrefix,'__len__')
and len(ByteStreamInputSvc.FilePrefix)):
_printConfig('Input=BS')
noInputFlag = False
break
if noInputFlag:
_printConfig('No Input')
# back navigation
if hasattr(EventSelector,'BackNavigation') and EventSelector.BackNavigation == True:
_printConfig('BackNavigation=ON')
# minimum bias
minBiasEventSelector = _Service( "minBiasEventSelector" )
if hasattr(minBiasEventSelector,'InputCollections') and hasattr(minBiasEventSelector.InputCollections,'__len__') \
and len(minBiasEventSelector.InputCollections):
_printConfig('Input=MINBIAS')
# cavern
cavernEventSelector = _Service( "cavernEventSelector" )
if hasattr(cavernEventSelector,'InputCollections') and hasattr(cavernEventSelector.InputCollections,'__len__') \
and len(cavernEventSelector.InputCollections):
_printConfig('Input=CAVERN')
# beam gas
BeamGasEventSelector = _Service( "BeamGasEventSelector" )
if hasattr(BeamGasEventSelector,'InputCollections') and hasattr(BeamGasEventSelector.InputCollections,'__len__') \
and len(BeamGasEventSelector.InputCollections):
_printConfig('Input=BEAMGAS')
# beam halo
BeamHaloEventSelector = _Service( "BeamHaloEventSelector" )
if hasattr(BeamHaloEventSelector,'InputCollections') and hasattr(BeamHaloEventSelector.InputCollections,'__len__') \
and len(BeamHaloEventSelector.InputCollections):
_printConfig('Input=BEAMHALO')
# condition files
CondProxyProvider = _Service( "CondProxyProvider" )
if hasattr(CondProxyProvider,'InputCollections') and hasattr(CondProxyProvider.InputCollections,'__len__') \
and len(CondProxyProvider.InputCollections):
condStr = ''
for fName in CondProxyProvider.InputCollections:
if not fName.startswith('LFN:'):
condStr += "%s," % fName
if condStr != '':
retStr = "CondInput %s" % condStr
retStr = retStr[:-1]
_printConfig(retStr)
######################
# configurable
_configs = []
seqList = []
try:
# get all Configurable names
from AthenaCommon.AlgSequence import AlgSequence
tmpKeys = AlgSequence().allConfigurables.keys()
# get AlgSequences
seqList = [AlgSequence()]
try:
for key in tmpKeys:
# check if it is available via AlgSequence
if not hasattr(AlgSequence(),key.split('/')[-1]):
continue
# get full name
tmpConf = getattr(AlgSequence(),key.split('/')[-1])
if hasattr(tmpConf,'getFullName'):
tmpFullName = tmpConf.getFullName()
# append AthSequencer
if tmpFullName.startswith('AthSequencer/'):
seqList.append(tmpConf)
except Exception:
pass
# loop over all sequences
for tmpAlgSequence in seqList:
# loop over keys
for key in tmpKeys:
# check if it is available via AlgSequence
if not hasattr(tmpAlgSequence,key.split('/')[-1]):
continue
# get fullname
if key.find('/') == -1:
if hasattr(tmpAlgSequence,key):
tmpAlg = getattr(tmpAlgSequence,key)
if hasattr(tmpAlg,'getFullName'):
_configs.append(getattr(tmpAlgSequence,key).getFullName())
elif hasattr(tmpAlg,'getName') and hasattr(tmpAlg,'getType'):
# ServiceHandle
_configs.append('%s/%s' % (tmpAlg.getType(),tmpAlg.getName()))
else:
# use short name if it doesn't have getFullName
_configs.append(key)
else:
_configs.append(key)
except Exception:
pass
def _getConfig(key):
if seqList == []:
from AthenaCommon.AlgSequence import AlgSequence
return getattr(AlgSequence(),key.split('/')[-1])
else:
for tmpAlgSequence in seqList:
if hasattr(tmpAlgSequence,key.split('/')[-1]):
return getattr(tmpAlgSequence,key.split('/')[-1])
######################
# output
# hist
HistogramPersistencySvc=_Service("HistogramPersistencySvc")
if hasattr(HistogramPersistencySvc,'OutputFile') and hasattr(HistogramPersistencySvc.OutputFile,'__len__') \
and len(HistogramPersistencySvc.OutputFile):
_printConfig('Output=HIST')
_printConfig(' Name: %s' % HistogramPersistencySvc.OutputFile)
# ntuple
NTupleSvc = _Service( "NTupleSvc" )
if hasattr(NTupleSvc,'Output') and hasattr(NTupleSvc.Output,'__len__') and len(NTupleSvc.Output):
# look for streamname
for item in NTupleSvc.Output:
match = re.search("(\S+)\s+DATAFILE",item)
if match is not None:
sName = item.split()[0]
_printConfig('Output=NTUPLE %s' % sName)
# extract name
fmatch = re.search("DATAFILE=(\S+)\s",item)
if fmatch is not None:
fName = fmatch.group(1)
fName = re.sub('[\"\']','',fName)
fName = fName.split('/')[-1]
_printConfig(' Name: %s'% fName)
streamOutputFiles = {}
ignoreMetaFiles = []
# RDO
foundStreamRD0 = False
key = "AthenaOutputStream/StreamRDO"
if key in _configs:
StreamRDO = _getConfig( key )
else:
StreamRDO = _Algorithm( key.split('/')[-1] )
if hasattr(StreamRDO,'OutputFile') and hasattr(StreamRDO.OutputFile,'__len__') and len(StreamRDO.OutputFile):
streamOutputFiles[key.split('/')[-1]] = StreamRDO.OutputFile
_printConfig('Output=RDO %s' % StreamRDO.OutputFile)
_printConfig(' Name: %s'% StreamRDO.OutputFile)
foundStreamRD0 = True
ignoreMetaFiles.append(StreamRDO.OutputFile)
# ESD
foundStreamESD = False
key = "AthenaOutputStream/StreamESD"
if key in _configs:
StreamESD = _getConfig( key )
else:
StreamESD = _Algorithm( key.split('/')[-1] )
if hasattr(StreamESD,'OutputFile') and hasattr(StreamESD.OutputFile,'__len__') and len(StreamESD.OutputFile):
streamOutputFiles[key.split('/')[-1]] = StreamESD.OutputFile
_printConfig('Output=ESD %s' % StreamESD.OutputFile)
_printConfig(' Name: %s'% StreamESD.OutputFile)
foundStreamESD = True
ignoreMetaFiles.append(StreamESD.OutputFile)
# AOD
foundStreamAOD = False
key = "AthenaOutputStream/StreamAOD"
if key in _configs:
StreamAOD = _getConfig( key )
else:
StreamAOD = _Algorithm( key.split('/')[-1] )
if hasattr(StreamAOD,'OutputFile') and hasattr(StreamAOD.OutputFile,'__len__') and len(StreamAOD.OutputFile):
streamOutputFiles[key.split('/')[-1]] = StreamAOD.OutputFile
_printConfig('Output=AOD %s' % StreamAOD.OutputFile)
_printConfig(' Name: %s'% StreamAOD.OutputFile)
foundStreamAOD = True
ignoreMetaFiles.append(StreamAOD.OutputFile)
# TAG
keys = ["AthenaOutputStream/StreamTAG","RegistrationStream/StreamTAG"]
foundKey = False
for key in keys:
if key in _configs:
StreamTAG = _getConfig( key )
foundKey = True
break
if not foundKey:
StreamTAG = _Algorithm( key.split('/')[-1] )
if hasattr(StreamTAG,'OutputCollection') and hasattr(StreamTAG.OutputCollection,'__len__') and \
len(StreamTAG.OutputCollection):
_printConfig('Output=TAG')
_printConfig(' Name: %s'% StreamTAG.OutputCollection)
# TAGCOM
keys = ["AthenaOutputStream/StreamTAGCOM","RegistrationStream/StreamTAGCOM"]
foundKey = False
for key in keys:
if key in _configs:
StreamTAGX = _getConfig( key )
foundKey = True
break
if not foundKey:
StreamTAGX = _Algorithm( key.split('/')[-1] )
if hasattr(StreamTAGX,'OutputCollection') and hasattr(StreamTAGX.OutputCollection,'__len__') and \
len(StreamTAGX.OutputCollection):
_printConfig('Output=TAGX %s %s' % (StreamTAGX.name(),StreamTAGX.OutputCollection))
_printConfig(' Name: %s'% StreamTAGX.OutputCollection)
# AANT
aantStream = []
appStList = []
for alg in theApp.TopAlg+_configs:
if alg.startswith("AANTupleStream" ):
aName = alg.split('/')[-1]
if alg in _configs:
AANTupleStream = _getConfig(alg)
else:
AANTupleStream = Algorithm(aName)
if hasattr(AANTupleStream.OutputName,'__len__') and len(AANTupleStream.OutputName):
fName = AANTupleStream.OutputName
# look for streamname
THistSvc = _Service( "THistSvc" )
if hasattr(THistSvc.Output,'__len__') and len(THistSvc.Output):
for item in THistSvc.Output:
if re.search(fName,item):
sName = item.split()[0]
# check stream name
if hasattr(AANTupleStream,'StreamName'):
if AANTupleStream.StreamName != sName:
continue
aantStream.append(sName)
tmpAantKey = (aName,sName,fName)
if tmpAantKey not in appStList:
_printConfig('Output=AANT %s %s %s' % (aName,sName,fName))
_printConfig(' Name: %s'% fName)
appStList.append(tmpAantKey)
break
# Stream1
key = "AthenaOutputStream/Stream1"
if key in _configs:
Stream1 = _getConfig( key )
elif hasattr(theApp._streams,key.split('/')[-1]):
Stream1 = getattr(theApp._streams,key.split('/')[-1])
else:
Stream1 = _Algorithm( key.split('/')[-1] )
if hasattr(Stream1,'OutputFile') and hasattr(Stream1.OutputFile,'__len__') and len(Stream1.OutputFile):
if (hasattr(Stream1,'Enable') and Stream1.Enable) or (not hasattr(Stream1,'Enable')):
streamOutputFiles[key.split('/')[-1]] = Stream1.OutputFile
_printConfig('Output=STREAM1 %s' % Stream1.OutputFile)
_printConfig(' Name: %s'% Stream1.OutputFile)
ignoreMetaFiles.append(Stream1.OutputFile)
# Stream2
key = "AthenaOutputStream/Stream2"
if key in _configs:
Stream2 = _getConfig( key )
elif hasattr(theApp._streams,key.split('/')[-1]):
Stream2 = getattr(theApp._streams,key.split('/')[-1])
else:
Stream2 = _Algorithm( key.split('/')[-1] )
if hasattr(Stream2,'OutputFile') and hasattr(Stream2.OutputFile,'__len__') and len(Stream2.OutputFile):
if (hasattr(Stream2,'Enable') and Stream2.Enable) or (not hasattr(Stream2,'Enable')):
streamOutputFiles[key.split('/')[-1]] = Stream2.OutputFile
_printConfig('Output=STREAM2 %s' % Stream2.OutputFile)
_printConfig(' Name: %s'% Stream2.OutputFile)
ignoreMetaFiles.append(Stream2.OutputFile)
# General Stream
strGenFName = ''
strGenStream = ''
strMetaStream = ''
ignoredStreamList = ['Stream1','Stream2','StreamBS','StreamBSFileOutput']
if foundStreamRD0:
# for old releases where StreamRDO was an algorithm
ignoredStreamList += ['StreamRDO']
if foundStreamESD:
# for streamESD defined as an algorithm
ignoredStreamList += ['StreamESD']
if foundStreamAOD:
# for streamAOD defined as an algorithm
ignoredStreamList += ['StreamAOD']
desdStreams = {}
try:
metaStreams = []
for genStream in theApp._streams.getAllChildren()+AlgSequence().getAllChildren():
# check name
fullName = genStream.getFullName()
if (fullName.split('/')[0] == 'AthenaOutputStream' or fullName.split('/')[0] == 'Athena::RootNtupleOutputStream') \
and (not fullName.split('/')[-1] in ignoredStreamList):
if hasattr(genStream,'OutputFile') and hasattr(genStream.OutputFile,'__len__') and len(genStream.OutputFile):
if (hasattr(genStream,'Enable') and genStream.Enable) or (not hasattr(genStream,'Enable')):
# keep meta data
if genStream.OutputFile.startswith("ROOTTREE:") or \
(hasattr(genStream,'WriteOnFinalize') and genStream.WriteOnFinalize):
metaStreams.append(genStream)
elif fullName.split('/')[-1].startswith('StreamDESD'):
# ignore StreamDESD to treat it as multiple-streams later
continue
else:
strGenStream += '%s:%s,' % (fullName.split('/')[-1],genStream.OutputFile)
streamOutputFiles[fullName.split('/')[-1]] = genStream.OutputFile
strGenFName = genStream.OutputFile
ignoreMetaFiles.append(genStream.OutputFile)
# associate meta stream
for mStream in metaStreams:
metaOutName = mStream.OutputFile.split(':')[-1]
assStream = None
# look for associated stream
for stName in streamOutputFiles:
stOut = streamOutputFiles[stName]
if metaOutName == stOut:
assStream = stName
break
# ignore meta stream since renaming is used instead of changing jobO
if metaOutName in ignoreMetaFiles:
continue
# print meta stream
if assStream is not None:
_printConfig('Output=META %s %s' % (mStream.getFullName().split('/')[1],assStream))
_printConfig(' Name: %s'% metaOutName)
except Exception:
pass
if strGenStream != '':
strGenStream = strGenStream[:-1]
_printConfig('Output=STREAMG %s' % strGenStream)
_printConfig(' Name: %s'% strGenFName)
if desdStreams != {}:
for tmpStreamName in desdStreams:
tmpOutFileName = desdStreams[tmpStreamName]
_printConfig('Output=DESD %s' % tmpStreamName)
_printConfig(' Name: %s'% tmpOutFileName)
# THIST
userDataSvcStream = {}
usedTHistStreams = []
THistSvc = _Service( "THistSvc" )
if hasattr(THistSvc.Output,'__len__') and len(THistSvc.Output):
for item in THistSvc.Output:
sName = item.split()[0]
if sName not in aantStream:
# extract name
fmatch = re.search("DATAFILE=(\S+)\s",item)
fName = None
if fmatch is not None:
fName = fmatch.group(1)
fName = re.sub('[\"\']','',fName)
fName = fName.split('/')[-1]
# keep output of UserDataSvc
if sName in ['userdataoutputstream'] or sName.startswith('userdataoutputstream'):
userDataSvcStream[sName] = fName
continue
# skip if defined in StreamG
if strGenFName != '' and fName == strGenFName:
continue
_printConfig('Output=THIST %s' % sName)
if fmatch is not None:
_printConfig(' Name: %s'% fName)
# ROOT outputs for interactive Athena
import ROOT
fList = ROOT.gROOT.GetListOfFiles()
for index in range(fList.GetSize()):
if fList[index].GetOption() == 'CREATE':
_printConfig('Output=IROOT %s' % fList[index].GetName())
_printConfig(' Name: %s'% fList[index].GetName())
# BS
ByteStreamCnvSvc = _Service("ByteStreamCnvSvc")
if hasattr(ByteStreamCnvSvc,'ByteStreamOutputSvc') and \
ByteStreamCnvSvc.ByteStreamOutputSvc=="ByteStreamEventStorageOutputSvc":
_printConfig('Output=BS')
elif hasattr(ByteStreamCnvSvc,'ByteStreamOutputSvcList') and \
'ByteStreamEventStorageOutputSvc' in ByteStreamCnvSvc.ByteStreamOutputSvcList:
_printConfig('Output=BS')
# selected BS
BSESOutputSvc = _Service("BSESOutputSvc")
if hasattr(BSESOutputSvc,'SimpleFileName'):
_printConfig('Output=SelBS %s' % BSESOutputSvc.SimpleFileName)
_printConfig(' Name: %s'% BSESOutputSvc.SimpleFileName)
# MultipleStream
try:
from OutputStreamAthenaPool.MultipleStreamManager import MSMgr
for tmpStream in MSMgr.StreamList:
# avoid duplication
if not tmpStream.Name in streamOutputFiles.keys():
# remove prefix
tmpFileBaseName = tmpStream.Stream.OutputFile.split(':')[-1]
_printConfig('Output=MS %s %s' % (tmpStream.Name,tmpFileBaseName))
_printConfig(' Name: %s'% tmpFileBaseName)
except Exception:
pass
# UserDataSvc
if userDataSvcStream != {}:
for userStName in userDataSvcStream:
userFileName = userDataSvcStream[userStName]
findStream = False
# look for associated stream
for stName in streamOutputFiles:
stOut = streamOutputFiles[stName]
if userFileName == stOut:
_printConfig('Output=USERDATA %s' % stName)
findStream = True
break
# use THIST if not found
if not findStream:
_printConfig('Output=THIST %s' % userStName)
_printConfig(' Name: %s'% userFileName)
######################
# random number
AtRndmGenSvc = _Service( "AtRndmGenSvc" )
if hasattr(AtRndmGenSvc,'Seeds') and hasattr(AtRndmGenSvc.Seeds,'__len__') and len(AtRndmGenSvc.Seeds):
# random seeds
for item in AtRndmGenSvc.Seeds:
_printConfig('RndmStream %s' % item.split()[0])
import types
if hasattr(AtRndmGenSvc,'ReadFromFile') and isinstance(AtRndmGenSvc.ReadFromFile,types.BooleanType) and AtRndmGenSvc.ReadFromFile:
# read from file
rndFileName = "AtRndmGenSvc.out"
if hasattr(AtRndmGenSvc.FileToRead,'__len__') and len(AtRndmGenSvc.FileToRead):
rndFileName = AtRndmGenSvc.FileToRead
_printConfig('RndmGenFile %s' % rndFileName)
# G4 random seed
try:
if hasattr(SimFlags,'SeedsG4'):
_printConfig('G4RandomSeeds')
except Exception:
pass
| [
"re.search",
"AthenaCommon.AlgSequence.AlgSequence",
"ROOT.gROOT.GetListOfFiles",
"re.sub"
] | [((17136, 17163), 'ROOT.gROOT.GetListOfFiles', 'ROOT.gROOT.GetListOfFiles', ([], {}), '()\n', (17161, 17163), False, 'import ROOT\n'), ((4625, 4638), 'AthenaCommon.AlgSequence.AlgSequence', 'AlgSequence', ([], {}), '()\n', (4636, 4638), False, 'from AthenaCommon.AlgSequence import AlgSequence\n'), ((7126, 7163), 're.search', 're.search', (['"""(\\\\S+)\\\\s+DATAFILE"""', 'item'], {}), "('(\\\\S+)\\\\s+DATAFILE', item)\n", (7135, 7163), False, 'import re\n'), ((6313, 6326), 'AthenaCommon.AlgSequence.AlgSequence', 'AlgSequence', ([], {}), '()\n', (6324, 6326), False, 'from AthenaCommon.AlgSequence import AlgSequence\n'), ((7328, 7365), 're.search', 're.search', (['"""DATAFILE=(\\\\S+)\\\\s"""', 'item'], {}), "('DATAFILE=(\\\\S+)\\\\s', item)\n", (7337, 7365), False, 'import re\n'), ((16377, 16414), 're.search', 're.search', (['"""DATAFILE=(\\\\S+)\\\\s"""', 'item'], {}), "('DATAFILE=(\\\\S+)\\\\s', item)\n", (16386, 16414), False, 'import re\n'), ((4549, 4562), 'AthenaCommon.AlgSequence.AlgSequence', 'AlgSequence', ([], {}), '()\n', (4560, 4562), False, 'from AthenaCommon.AlgSequence import AlgSequence\n'), ((4877, 4890), 'AthenaCommon.AlgSequence.AlgSequence', 'AlgSequence', ([], {}), '()\n', (4888, 4890), False, 'from AthenaCommon.AlgSequence import AlgSequence\n'), ((7462, 7488), 're.sub', 're.sub', (['"""["\']"""', '""""""', 'fName'], {}), '(\'["\\\']\', \'\', fName)\n', (7468, 7488), False, 'import re\n'), ((13648, 13661), 'AthenaCommon.AlgSequence.AlgSequence', 'AlgSequence', ([], {}), '()\n', (13659, 13661), False, 'from AthenaCommon.AlgSequence import AlgSequence\n'), ((16536, 16562), 're.sub', 're.sub', (['"""["\']"""', '""""""', 'fName'], {}), '(\'["\\\']\', \'\', fName)\n', (16542, 16562), False, 'import re\n'), ((4759, 4772), 'AthenaCommon.AlgSequence.AlgSequence', 'AlgSequence', ([], {}), '()\n', (4770, 4772), False, 'from AthenaCommon.AlgSequence import AlgSequence\n'), ((10983, 11005), 're.search', 're.search', (['fName', 'item'], {}), '(fName, item)\n', (10992, 11005), False, 'import re\n')] |
# Project: File Volume Indexer
# Author: <NAME>
# Date Started: February 28, 2019
# Copyright: (c) Copyright 2019 <NAME>
# Module: FrameScroller
# Purpose: View for managing scans of volumes and sub volumes.
# Development:
# Instructions for use:
# Since the content of a scrollableFrame must have the scrollable Frame as its parent, the scrollable
# Frame must be obtained from the scroller Frame and the pacing or gridding of the content Frame
# makes it visible in the scroller Frame. This is categorically outside of the control of this module,
# unless the user passes a json frame definition in and it is created here. If so, then the user must
# still retrieve the constructed frame to make it the parent of the components it contains. They could
# also pass in an entire json framem definition including all components, in which case this module
# will have a method to obtain reverences to any of the components constructed by passing their names in.
# The standard name path can be used for the name to make this entirely general for complex nesting structures
# with repeated component names at different levels.
#
# 2021-08-25:
# Copied from VolumeIndexer project.
#
from tkinter import Frame, LabelFrame, Tk, Text, Scrollbar, Label, \
BOTTOM, W, RIGHT, X, Y, VERTICAL, HORIZONTAL, BOTH, INSERT
class FrameScroller(LabelFrame):
def __init__(self, container, name: str, **keyWordArguments):
LabelFrame.__init__(self, container, name=name)
if "minimize" in keyWordArguments and isinstance( keyWordArguments["minimize"], bool ) and keyWordArguments["minimize"]:
self.minimize = keyWordArguments["minimize"]
self.stretch = False
else:
self.stretch = True
self.minimize = False
self.textScroller = Text(self, name="textScroller")
self.scrollerFrame = Frame(self.textScroller, name="scrollerFrame")
self.textScroller.window_create(INSERT, window=self.scrollerFrame, stretch=self.stretch, align=BOTTOM)
self.scrollbarVert = Scrollbar(self, name="scrollbarVert", orient=VERTICAL)
self.scrollbarHorz = Scrollbar(self, name="scrollbarHorz", orient=HORIZONTAL)
#self.scrollbarHorz.pack(side=BOTTOM, fil=X, anchor=W)
self.scrollbarHorz.pack(side=BOTTOM, anchor=W, fill=X)
self.scrollbarVert.pack(side=RIGHT, fill=Y)
self.textScroller.config(yscrollcommand=self.scrollbarVert.set)
self.textScroller.config(xscrollcommand=self.scrollbarHorz.set)
self.scrollbarVert.config(command=self.textScroller.yview)
self.scrollbarHorz.config(command=self.textScroller.xview)
if self.minimize:
self.textScroller.pack()
else:
self.textScroller.pack(fill=BOTH, expand=True)
def getScrollerFrame(self):
return self.scrollerFrame
if __name__ == "__main__":
print("FrameScroller running")
mainView = Tk()
mainView.geometry("300x400+300+100")
frameScroller = FrameScroller(mainView, "frameScroller")
label = Label(frameScroller.getScrollerFrame(), name="label", width=100, text='Since the content of a scrollableFrame must have the scrollable Frame as its parent, the scrollable Frame must be obtained from the scroller Frame and the pacing or gridding of the content Frame makes it visible in the scroller Frame. This is categorically outside of the control of this module,')
label.pack()
frameScroller.pack()
mainView.mainloop() | [
"tkinter.Text",
"tkinter.LabelFrame.__init__",
"tkinter.Scrollbar",
"tkinter.Frame",
"tkinter.Tk"
] | [((3122, 3126), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (3124, 3126), False, 'from tkinter import Frame, LabelFrame, Tk, Text, Scrollbar, Label, BOTTOM, W, RIGHT, X, Y, VERTICAL, HORIZONTAL, BOTH, INSERT\n'), ((1595, 1642), 'tkinter.LabelFrame.__init__', 'LabelFrame.__init__', (['self', 'container'], {'name': 'name'}), '(self, container, name=name)\n', (1614, 1642), False, 'from tkinter import Frame, LabelFrame, Tk, Text, Scrollbar, Label, BOTTOM, W, RIGHT, X, Y, VERTICAL, HORIZONTAL, BOTH, INSERT\n'), ((1987, 2018), 'tkinter.Text', 'Text', (['self'], {'name': '"""textScroller"""'}), "(self, name='textScroller')\n", (1991, 2018), False, 'from tkinter import Frame, LabelFrame, Tk, Text, Scrollbar, Label, BOTTOM, W, RIGHT, X, Y, VERTICAL, HORIZONTAL, BOTH, INSERT\n'), ((2053, 2099), 'tkinter.Frame', 'Frame', (['self.textScroller'], {'name': '"""scrollerFrame"""'}), "(self.textScroller, name='scrollerFrame')\n", (2058, 2099), False, 'from tkinter import Frame, LabelFrame, Tk, Text, Scrollbar, Label, BOTTOM, W, RIGHT, X, Y, VERTICAL, HORIZONTAL, BOTH, INSERT\n'), ((2242, 2296), 'tkinter.Scrollbar', 'Scrollbar', (['self'], {'name': '"""scrollbarVert"""', 'orient': 'VERTICAL'}), "(self, name='scrollbarVert', orient=VERTICAL)\n", (2251, 2296), False, 'from tkinter import Frame, LabelFrame, Tk, Text, Scrollbar, Label, BOTTOM, W, RIGHT, X, Y, VERTICAL, HORIZONTAL, BOTH, INSERT\n'), ((2326, 2382), 'tkinter.Scrollbar', 'Scrollbar', (['self'], {'name': '"""scrollbarHorz"""', 'orient': 'HORIZONTAL'}), "(self, name='scrollbarHorz', orient=HORIZONTAL)\n", (2335, 2382), False, 'from tkinter import Frame, LabelFrame, Tk, Text, Scrollbar, Label, BOTTOM, W, RIGHT, X, Y, VERTICAL, HORIZONTAL, BOTH, INSERT\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for the API /deploy_templates/ methods.
"""
import datetime
from http import client as http_client
from unittest import mock
from urllib import parse as urlparse
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
from ironic.api.controllers import base as api_base
from ironic.api.controllers import v1 as api_v1
from ironic.api.controllers.v1 import notification_utils
from ironic.common import exception
from ironic import objects
from ironic.objects import fields as obj_fields
from ironic.tests.unit.api import base as test_api_base
from ironic.tests.unit.api import utils as test_api_utils
from ironic.tests.unit.objects import utils as obj_utils
def _obj_to_api_step(obj_step):
"""Convert a deploy step in 'object' form to one in 'API' form."""
return {
'interface': obj_step['interface'],
'step': obj_step['step'],
'args': obj_step['args'],
'priority': obj_step['priority'],
}
class BaseDeployTemplatesAPITest(test_api_base.BaseApiTest):
headers = {api_base.Version.string: str(api_v1.max_version())}
invalid_version_headers = {api_base.Version.string: '1.54'}
class TestListDeployTemplates(BaseDeployTemplatesAPITest):
def test_empty(self):
data = self.get_json('/deploy_templates', headers=self.headers)
self.assertEqual([], data['deploy_templates'])
def test_one(self):
template = obj_utils.create_test_deploy_template(self.context)
data = self.get_json('/deploy_templates', headers=self.headers)
self.assertEqual(1, len(data['deploy_templates']))
self.assertEqual(template.uuid, data['deploy_templates'][0]['uuid'])
self.assertEqual(template.name, data['deploy_templates'][0]['name'])
self.assertNotIn('steps', data['deploy_templates'][0])
self.assertNotIn('extra', data['deploy_templates'][0])
def test_get_one(self):
template = obj_utils.create_test_deploy_template(self.context)
data = self.get_json('/deploy_templates/%s' % template.uuid,
headers=self.headers)
self.assertEqual(template.uuid, data['uuid'])
self.assertEqual(template.name, data['name'])
self.assertEqual(template.extra, data['extra'])
for t_dict_step, t_step in zip(data['steps'], template.steps):
self.assertEqual(t_dict_step['interface'], t_step['interface'])
self.assertEqual(t_dict_step['step'], t_step['step'])
self.assertEqual(t_dict_step['args'], t_step['args'])
self.assertEqual(t_dict_step['priority'], t_step['priority'])
def test_get_one_with_json(self):
template = obj_utils.create_test_deploy_template(self.context)
data = self.get_json('/deploy_templates/%s.json' % template.uuid,
headers=self.headers)
self.assertEqual(template.uuid, data['uuid'])
def test_get_one_with_suffix(self):
template = obj_utils.create_test_deploy_template(self.context,
name='CUSTOM_DT1')
data = self.get_json('/deploy_templates/%s' % template.uuid,
headers=self.headers)
self.assertEqual(template.uuid, data['uuid'])
def test_get_one_custom_fields(self):
template = obj_utils.create_test_deploy_template(self.context)
fields = 'name,steps'
data = self.get_json(
'/deploy_templates/%s?fields=%s' % (template.uuid, fields),
headers=self.headers)
# We always append "links"
self.assertCountEqual(['name', 'steps', 'links'], data)
def test_get_collection_custom_fields(self):
fields = 'uuid,steps'
for i in range(3):
obj_utils.create_test_deploy_template(
self.context,
uuid=uuidutils.generate_uuid(),
name='CUSTOM_DT%s' % i)
data = self.get_json(
'/deploy_templates?fields=%s' % fields,
headers=self.headers)
self.assertEqual(3, len(data['deploy_templates']))
for template in data['deploy_templates']:
# We always append "links"
self.assertCountEqual(['uuid', 'steps', 'links'], template)
def test_get_custom_fields_invalid_fields(self):
template = obj_utils.create_test_deploy_template(self.context)
fields = 'uuid,spongebob'
response = self.get_json(
'/deploy_templates/%s?fields=%s' % (template.uuid, fields),
headers=self.headers, expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('spongebob', response.json['error_message'])
def test_get_all_invalid_api_version(self):
obj_utils.create_test_deploy_template(self.context)
response = self.get_json('/deploy_templates',
headers=self.invalid_version_headers,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_get_one_invalid_api_version(self):
template = obj_utils.create_test_deploy_template(self.context)
response = self.get_json(
'/deploy_templates/%s' % (template.uuid),
headers=self.invalid_version_headers,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_detail_query(self):
template = obj_utils.create_test_deploy_template(self.context)
data = self.get_json('/deploy_templates?detail=True',
headers=self.headers)
self.assertEqual(template.uuid, data['deploy_templates'][0]['uuid'])
self.assertIn('name', data['deploy_templates'][0])
self.assertIn('steps', data['deploy_templates'][0])
self.assertIn('extra', data['deploy_templates'][0])
def test_detail_query_false(self):
obj_utils.create_test_deploy_template(self.context)
data1 = self.get_json('/deploy_templates', headers=self.headers)
data2 = self.get_json(
'/deploy_templates?detail=False', headers=self.headers)
self.assertEqual(data1['deploy_templates'], data2['deploy_templates'])
def test_detail_using_query_false_and_fields(self):
obj_utils.create_test_deploy_template(self.context)
data = self.get_json(
'/deploy_templates?detail=False&fields=steps',
headers=self.headers)
self.assertIn('steps', data['deploy_templates'][0])
self.assertNotIn('uuid', data['deploy_templates'][0])
self.assertNotIn('extra', data['deploy_templates'][0])
def test_detail_using_query_and_fields(self):
obj_utils.create_test_deploy_template(self.context)
response = self.get_json(
'/deploy_templates?detail=True&fields=name', headers=self.headers,
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
def test_many(self):
templates = []
for id_ in range(5):
template = obj_utils.create_test_deploy_template(
self.context, uuid=uuidutils.generate_uuid(),
name='CUSTOM_DT%s' % id_)
templates.append(template.uuid)
data = self.get_json('/deploy_templates', headers=self.headers)
self.assertEqual(len(templates), len(data['deploy_templates']))
uuids = [n['uuid'] for n in data['deploy_templates']]
self.assertCountEqual(templates, uuids)
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_deploy_template(self.context, uuid=uuid)
data = self.get_json('/deploy_templates/%s' % uuid,
headers=self.headers)
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
for link in data['links']:
bookmark = link['rel'] == 'bookmark'
self.assertTrue(self.validate_link(link['href'], bookmark=bookmark,
headers=self.headers))
def test_collection_links(self):
templates = []
for id_ in range(5):
template = obj_utils.create_test_deploy_template(
self.context, uuid=uuidutils.generate_uuid(),
name='CUSTOM_DT%s' % id_)
templates.append(template.uuid)
data = self.get_json('/deploy_templates/?limit=3',
headers=self.headers)
self.assertEqual(3, len(data['deploy_templates']))
next_marker = data['deploy_templates'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
templates = []
for id_ in range(5):
template = obj_utils.create_test_deploy_template(
self.context, uuid=uuidutils.generate_uuid(),
name='CUSTOM_DT%s' % id_)
templates.append(template.uuid)
data = self.get_json('/deploy_templates', headers=self.headers)
self.assertEqual(3, len(data['deploy_templates']))
next_marker = data['deploy_templates'][-1]['uuid']
self.assertIn(next_marker, data['next'])
def test_collection_links_custom_fields(self):
cfg.CONF.set_override('max_limit', 3, 'api')
templates = []
fields = 'uuid,steps'
for i in range(5):
template = obj_utils.create_test_deploy_template(
self.context,
uuid=uuidutils.generate_uuid(),
name='CUSTOM_DT%s' % i)
templates.append(template.uuid)
data = self.get_json('/deploy_templates?fields=%s' % fields,
headers=self.headers)
self.assertEqual(3, len(data['deploy_templates']))
next_marker = data['deploy_templates'][-1]['uuid']
self.assertIn(next_marker, data['next'])
self.assertIn('fields', data['next'])
def test_get_collection_pagination_no_uuid(self):
fields = 'name'
limit = 2
templates = []
for id_ in range(3):
template = obj_utils.create_test_deploy_template(
self.context,
uuid=uuidutils.generate_uuid(),
name='CUSTOM_DT%s' % id_)
templates.append(template)
data = self.get_json(
'/deploy_templates?fields=%s&limit=%s' % (fields, limit),
headers=self.headers)
self.assertEqual(limit, len(data['deploy_templates']))
self.assertIn('marker=%s' % templates[limit - 1].uuid, data['next'])
def test_sort_key(self):
templates = []
for id_ in range(3):
template = obj_utils.create_test_deploy_template(
self.context,
uuid=uuidutils.generate_uuid(),
name='CUSTOM_DT%s' % id_)
templates.append(template.uuid)
data = self.get_json('/deploy_templates?sort_key=uuid',
headers=self.headers)
uuids = [n['uuid'] for n in data['deploy_templates']]
self.assertEqual(sorted(templates), uuids)
def test_sort_key_invalid(self):
invalid_keys_list = ['extra', 'foo', 'steps']
for invalid_key in invalid_keys_list:
path = '/deploy_templates?sort_key=%s' % invalid_key
response = self.get_json(path, expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn(invalid_key, response.json['error_message'])
def _test_sort_key_allowed(self, detail=False):
template_uuids = []
for id_ in range(3, 0, -1):
template = obj_utils.create_test_deploy_template(
self.context,
uuid=uuidutils.generate_uuid(),
name='CUSTOM_DT%s' % id_)
template_uuids.append(template.uuid)
template_uuids.reverse()
url = '/deploy_templates?sort_key=name&detail=%s' % str(detail)
data = self.get_json(url, headers=self.headers)
data_uuids = [p['uuid'] for p in data['deploy_templates']]
self.assertEqual(template_uuids, data_uuids)
def test_sort_key_allowed(self):
self._test_sort_key_allowed()
def test_detail_sort_key_allowed(self):
self._test_sort_key_allowed(detail=True)
def test_sensitive_data_masked(self):
template = obj_utils.get_test_deploy_template(self.context)
template.steps[0]['args']['password'] = '<PASSWORD>'
template.create()
data = self.get_json('/deploy_templates/%s' % template.uuid,
headers=self.headers)
self.assertEqual("******", data['steps'][0]['args']['password'])
@mock.patch.object(objects.DeployTemplate, 'save', autospec=True)
class TestPatch(BaseDeployTemplatesAPITest):
def setUp(self):
super(TestPatch, self).setUp()
self.template = obj_utils.create_test_deploy_template(
self.context, name='CUSTOM_DT1')
def _test_update_ok(self, mock_save, patch):
response = self.patch_json('/deploy_templates/%s' % self.template.uuid,
patch, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
mock_save.assert_called_once_with(mock.ANY)
return response
def _test_update_bad_request(self, mock_save, patch, error_msg=None):
response = self.patch_json('/deploy_templates/%s' % self.template.uuid,
patch, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
if error_msg:
self.assertIn(error_msg, response.json['error_message'])
self.assertFalse(mock_save.called)
return response
@mock.patch.object(notification_utils, '_emit_api_notification',
autospec=True)
def test_update_by_id(self, mock_notify, mock_save):
name = 'CUSTOM_DT2'
patch = [{'path': '/name', 'value': name, 'op': 'add'}]
response = self._test_update_ok(mock_save, patch)
self.assertEqual(name, response.json['name'])
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END)])
def test_update_by_name(self, mock_save):
steps = [{
'interface': 'bios',
'step': 'apply_configuration',
'args': {'foo': 'bar'},
'priority': 42
}]
patch = [{'path': '/steps', 'value': steps, 'op': 'replace'}]
response = self.patch_json('/deploy_templates/%s' % self.template.name,
patch, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
mock_save.assert_called_once_with(mock.ANY)
self.assertEqual(steps, response.json['steps'])
def test_update_by_name_with_json(self, mock_save):
interface = 'bios'
path = '/deploy_templates/%s.json' % self.template.name
response = self.patch_json(path,
[{'path': '/steps/0/interface',
'value': interface,
'op': 'replace'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(interface, response.json['steps'][0]['interface'])
def test_update_name_standard_trait(self, mock_save):
name = 'HW_CPU_X86_VMX'
patch = [{'path': '/name', 'value': name, 'op': 'replace'}]
response = self._test_update_ok(mock_save, patch)
self.assertEqual(name, response.json['name'])
def test_update_name_custom_trait(self, mock_save):
name = 'CUSTOM_DT2'
patch = [{'path': '/name', 'value': name, 'op': 'replace'}]
response = self._test_update_ok(mock_save, patch)
self.assertEqual(name, response.json['name'])
def test_update_invalid_name(self, mock_save):
self._test_update_bad_request(
mock_save,
[{'path': '/name', 'value': 'aa:bb_cc', 'op': 'replace'}],
"'aa:bb_cc' does not match '^CUSTOM_[A-Z0-9_]+$'")
def test_update_by_id_invalid_api_version(self, mock_save):
name = 'CUSTOM_DT2'
headers = self.invalid_version_headers
response = self.patch_json('/deploy_templates/%s' % self.template.uuid,
[{'path': '/name',
'value': name,
'op': 'add'}],
headers=headers,
expect_errors=True)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
self.assertFalse(mock_save.called)
def test_update_by_name_old_api_version(self, mock_save):
name = 'CUSTOM_DT2'
response = self.patch_json('/deploy_templates/%s' % self.template.name,
[{'path': '/name',
'value': name,
'op': 'add'}],
expect_errors=True)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
self.assertFalse(mock_save.called)
def test_update_not_found(self, mock_save):
name = 'CUSTOM_DT2'
uuid = uuidutils.generate_uuid()
response = self.patch_json('/deploy_templates/%s' % uuid,
[{'path': '/name',
'value': name,
'op': 'add'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
self.assertTrue(response.json['error_message'])
self.assertFalse(mock_save.called)
@mock.patch.object(notification_utils, '_emit_api_notification',
autospec=True)
def test_replace_name_already_exist(self, mock_notify, mock_save):
name = 'CUSTOM_DT2'
obj_utils.create_test_deploy_template(self.context,
uuid=uuidutils.generate_uuid(),
name=name)
mock_save.side_effect = exception.DeployTemplateAlreadyExists(
uuid=self.template.uuid)
response = self.patch_json('/deploy_templates/%s' % self.template.uuid,
[{'path': '/name',
'value': name,
'op': 'replace'}],
expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.CONFLICT, response.status_code)
self.assertTrue(response.json['error_message'])
mock_save.assert_called_once_with(mock.ANY)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, mock.ANY, 'update',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR)])
def test_replace_invalid_name_too_long(self, mock_save):
name = 'CUSTOM_' + 'X' * 249
patch = [{'path': '/name', 'op': 'replace', 'value': name}]
self._test_update_bad_request(
mock_save, patch, "'%s' is too long" % name)
def test_replace_invalid_name_not_a_trait(self, mock_save):
name = 'not-a-trait'
patch = [{'path': '/name', 'op': 'replace', 'value': name}]
self._test_update_bad_request(
mock_save, patch,
"'not-a-trait' does not match '^CUSTOM_[A-Z0-9_]+$'")
def test_replace_invalid_name_none(self, mock_save):
patch = [{'path': '/name', 'op': 'replace', 'value': None}]
self._test_update_bad_request(
mock_save, patch, "None is not of type 'string'")
def test_replace_duplicate_step(self, mock_save):
# interface & step combination must be unique.
steps = [
{
'interface': 'raid',
'step': 'create_configuration',
'args': {'foo': '%d' % i},
'priority': i,
}
for i in range(2)
]
patch = [{'path': '/steps', 'op': 'replace', 'value': steps}]
self._test_update_bad_request(
mock_save, patch, "Duplicate deploy steps")
def test_replace_invalid_step_interface_fail(self, mock_save):
step = {
'interface': 'foo',
'step': 'apply_configuration',
'args': {'foo': 'bar'},
'priority': 42
}
patch = [{'path': '/steps/0', 'op': 'replace', 'value': step}]
self._test_update_bad_request(
mock_save, patch, "'foo' is not one of")
def test_replace_non_existent_step_fail(self, mock_save):
step = {
'interface': 'bios',
'step': 'apply_configuration',
'args': {'foo': 'bar'},
'priority': 42
}
patch = [{'path': '/steps/1', 'op': 'replace', 'value': step}]
self._test_update_bad_request(mock_save, patch)
def test_replace_empty_step_list_fail(self, mock_save):
patch = [{'path': '/steps', 'op': 'replace', 'value': []}]
self._test_update_bad_request(
mock_save, patch, '[] is too short')
def _test_remove_not_allowed(self, mock_save, field, error_msg=None):
patch = [{'path': '/%s' % field, 'op': 'remove'}]
self._test_update_bad_request(mock_save, patch, error_msg)
def test_remove_uuid(self, mock_save):
self._test_remove_not_allowed(
mock_save, 'uuid',
"Cannot patch /uuid")
def test_remove_name(self, mock_save):
self._test_remove_not_allowed(
mock_save, 'name',
"'name' is a required property")
def test_remove_steps(self, mock_save):
self._test_remove_not_allowed(
mock_save, 'steps',
"'steps' is a required property")
def test_remove_foo(self, mock_save):
self._test_remove_not_allowed(mock_save, 'foo')
def test_replace_step_invalid_interface(self, mock_save):
patch = [{'path': '/steps/0/interface', 'op': 'replace',
'value': 'foo'}]
self._test_update_bad_request(
mock_save, patch, "'foo' is not one of")
def test_replace_multi(self, mock_save):
steps = [
{
'interface': 'raid',
'step': 'create_configuration%d' % i,
'args': {},
'priority': 10,
}
for i in range(3)
]
template = obj_utils.create_test_deploy_template(
self.context, uuid=uuidutils.generate_uuid(), name='CUSTOM_DT2',
steps=steps)
# mutate steps so we replace all of them
for step in steps:
step['priority'] = step['priority'] + 1
patch = []
for i, step in enumerate(steps):
patch.append({'path': '/steps/%s' % i,
'value': step,
'op': 'replace'})
response = self.patch_json('/deploy_templates/%s' % template.uuid,
patch, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(steps, response.json['steps'])
mock_save.assert_called_once_with(mock.ANY)
def test_remove_multi(self, mock_save):
steps = [
{
'interface': 'raid',
'step': 'create_configuration%d' % i,
'args': {},
'priority': 10,
}
for i in range(3)
]
template = obj_utils.create_test_deploy_template(
self.context, uuid=uuidutils.generate_uuid(), name='CUSTOM_DT2',
steps=steps)
# Removing one step from the collection
steps.pop(1)
response = self.patch_json('/deploy_templates/%s' % template.uuid,
[{'path': '/steps/1',
'op': 'remove'}],
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(steps, response.json['steps'])
mock_save.assert_called_once_with(mock.ANY)
def test_remove_non_existent_property_fail(self, mock_save):
patch = [{'path': '/non-existent', 'op': 'remove'}]
self._test_update_bad_request(mock_save, patch)
def test_remove_non_existent_step_fail(self, mock_save):
patch = [{'path': '/steps/1', 'op': 'remove'}]
self._test_update_bad_request(mock_save, patch)
def test_remove_only_step_fail(self, mock_save):
patch = [{'path': '/steps/0', 'op': 'remove'}]
self._test_update_bad_request(
mock_save, patch, "[] is too short")
def test_remove_non_existent_step_property_fail(self, mock_save):
patch = [{'path': '/steps/0/non-existent', 'op': 'remove'}]
self._test_update_bad_request(mock_save, patch)
def test_add_root_non_existent(self, mock_save):
patch = [{'path': '/foo', 'value': 'bar', 'op': 'add'}]
self._test_update_bad_request(
mock_save, patch,
"Cannot patch /foo")
def test_add_too_high_index_step_fail(self, mock_save):
step = {
'interface': 'bios',
'step': 'apply_configuration',
'args': {'foo': 'bar'},
'priority': 42
}
patch = [{'path': '/steps/2', 'op': 'add', 'value': step}]
self._test_update_bad_request(mock_save, patch)
def test_add_multi(self, mock_save):
steps = [
{
'interface': 'raid',
'step': 'create_configuration%d' % i,
'args': {},
'priority': 10,
}
for i in range(3)
]
patch = []
for i, step in enumerate(steps):
patch.append({'path': '/steps/%d' % i,
'value': step,
'op': 'add'})
response = self.patch_json('/deploy_templates/%s' % self.template.uuid,
patch, headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.OK, response.status_code)
self.assertEqual(steps, response.json['steps'][:-1])
self.assertEqual(_obj_to_api_step(self.template.steps[0]),
response.json['steps'][-1])
mock_save.assert_called_once_with(mock.ANY)
class TestPost(BaseDeployTemplatesAPITest):
@mock.patch.object(notification_utils, '_emit_api_notification',
autospec=True)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_create(self, mock_utcnow, mock_notify):
tdict = test_api_utils.post_get_test_deploy_template()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/deploy_templates', tdict,
headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
result = self.get_json('/deploy_templates/%s' % tdict['uuid'],
headers=self.headers)
self.assertEqual(tdict['uuid'], result['uuid'])
self.assertFalse(result['updated_at'])
return_created_at = timeutils.parse_isotime(
result['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/deploy_templates/%s' % tdict['uuid']
self.assertEqual(expected_location,
urlparse.urlparse(response.location).path)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END)])
def test_create_invalid_api_version(self):
tdict = test_api_utils.post_get_test_deploy_template()
response = self.post_json(
'/deploy_templates', tdict, headers=self.invalid_version_headers,
expect_errors=True)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
def test_create_doesnt_contain_id(self):
with mock.patch.object(
self.dbapi, 'create_deploy_template',
wraps=self.dbapi.create_deploy_template) as mock_create:
tdict = test_api_utils.post_get_test_deploy_template()
self.post_json('/deploy_templates', tdict, headers=self.headers)
self.get_json('/deploy_templates/%s' % tdict['uuid'],
headers=self.headers)
mock_create.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', mock_create.call_args[0][0])
@mock.patch.object(notification_utils.LOG, 'exception', autospec=True)
@mock.patch.object(notification_utils.LOG, 'warning', autospec=True)
def test_create_generate_uuid(self, mock_warn, mock_except):
tdict = test_api_utils.post_get_test_deploy_template()
del tdict['uuid']
response = self.post_json('/deploy_templates', tdict,
headers=self.headers)
result = self.get_json('/deploy_templates/%s' % response.json['uuid'],
headers=self.headers)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
self.assertFalse(mock_warn.called)
self.assertFalse(mock_except.called)
@mock.patch.object(notification_utils, '_emit_api_notification',
autospec=True)
@mock.patch.object(objects.DeployTemplate, 'create', autospec=True)
def test_create_error(self, mock_create, mock_notify):
mock_create.side_effect = Exception()
tdict = test_api_utils.post_get_test_deploy_template()
self.post_json('/deploy_templates', tdict, headers=self.headers,
expect_errors=True)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, mock.ANY, 'create',
obj_fields.NotificationLevel.ERROR,
obj_fields.NotificationStatus.ERROR)])
def _test_create_ok(self, tdict):
response = self.post_json('/deploy_templates', tdict,
headers=self.headers)
self.assertEqual(http_client.CREATED, response.status_int)
def _test_create_bad_request(self, tdict, error_msg):
response = self.post_json('/deploy_templates', tdict,
expect_errors=True, headers=self.headers)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
self.assertIn(error_msg, response.json['error_message'])
def test_create_long_name(self):
name = 'CUSTOM_' + 'X' * 248
tdict = test_api_utils.post_get_test_deploy_template(name=name)
self._test_create_ok(tdict)
def test_create_standard_trait_name(self):
name = 'HW_CPU_X86_VMX'
tdict = test_api_utils.post_get_test_deploy_template(name=name)
self._test_create_ok(tdict)
def test_create_name_invalid_too_long(self):
name = 'CUSTOM_' + 'X' * 249
tdict = test_api_utils.post_get_test_deploy_template(name=name)
self._test_create_bad_request(
tdict, "'%s' is too long" % name)
def test_create_name_invalid_not_a_trait(self):
name = 'not-a-trait'
tdict = test_api_utils.post_get_test_deploy_template(name=name)
self._test_create_bad_request(
tdict, "'not-a-trait' does not match '^CUSTOM_[A-Z0-9_]+$'")
def test_create_steps_invalid_duplicate(self):
steps = [
{
'interface': 'raid',
'step': 'create_configuration',
'args': {'foo': '%d' % i},
'priority': i,
}
for i in range(2)
]
tdict = test_api_utils.post_get_test_deploy_template(steps=steps)
self._test_create_bad_request(tdict, "Duplicate deploy steps")
def _test_create_no_mandatory_field(self, field):
tdict = test_api_utils.post_get_test_deploy_template()
del tdict[field]
self._test_create_bad_request(tdict, "is a required property")
def test_create_no_mandatory_field_name(self):
self._test_create_no_mandatory_field('name')
def test_create_no_mandatory_field_steps(self):
self._test_create_no_mandatory_field('steps')
def _test_create_no_mandatory_step_field(self, field):
tdict = test_api_utils.post_get_test_deploy_template()
del tdict['steps'][0][field]
self._test_create_bad_request(tdict, "is a required property")
def test_create_no_mandatory_step_field_interface(self):
self._test_create_no_mandatory_step_field('interface')
def test_create_no_mandatory_step_field_step(self):
self._test_create_no_mandatory_step_field('step')
def test_create_no_mandatory_step_field_args(self):
self._test_create_no_mandatory_step_field('args')
def test_create_no_mandatory_step_field_priority(self):
self._test_create_no_mandatory_step_field('priority')
def _test_create_invalid_field(self, field, value, error_msg):
tdict = test_api_utils.post_get_test_deploy_template()
tdict[field] = value
self._test_create_bad_request(tdict, error_msg)
def test_create_invalid_field_name(self):
self._test_create_invalid_field(
'name', 42, "42 is not of type 'string'")
def test_create_invalid_field_name_none(self):
self._test_create_invalid_field(
'name', None, "None is not of type 'string'")
def test_create_invalid_field_steps(self):
self._test_create_invalid_field(
'steps', {}, "{} is not of type 'array'")
def test_create_invalid_field_empty_steps(self):
self._test_create_invalid_field(
'steps', [], "[] is too short")
def test_create_invalid_field_extra(self):
self._test_create_invalid_field(
'extra', 42, "42 is not of type 'object'")
def test_create_invalid_field_foo(self):
self._test_create_invalid_field(
'foo', 'bar',
"Additional properties are not allowed ('foo' was unexpected)")
def _test_create_invalid_step_field(self, field, value, error_msg=None):
tdict = test_api_utils.post_get_test_deploy_template()
tdict['steps'][0][field] = value
if error_msg is None:
error_msg = "Deploy template invalid: "
self._test_create_bad_request(tdict, error_msg)
def test_create_invalid_step_field_interface1(self):
self._test_create_invalid_step_field(
'interface', [3], "[3] is not of type 'string'")
def test_create_invalid_step_field_interface2(self):
self._test_create_invalid_step_field(
'interface', 'foo', "'foo' is not one of")
def test_create_invalid_step_field_step(self):
self._test_create_invalid_step_field(
'step', 42, "42 is not of type 'string'")
def test_create_invalid_step_field_args1(self):
self._test_create_invalid_step_field(
'args', 'not a dict', "'not a dict' is not of type 'object'")
def test_create_invalid_step_field_args2(self):
self._test_create_invalid_step_field(
'args', [], "[] is not of type 'object'")
def test_create_invalid_step_field_priority(self):
self._test_create_invalid_step_field(
'priority', 'not a number',
"'not a number' is not of type 'integer'")
def test_create_invalid_step_field_negative_priority(self):
self._test_create_invalid_step_field(
'priority', -1, "-1 is less than the minimum of 0")
def test_create_invalid_step_field_foo(self):
self._test_create_invalid_step_field(
'foo', 'bar',
"Additional properties are not allowed ('foo' was unexpected)")
def test_create_step_string_priority(self):
tdict = test_api_utils.post_get_test_deploy_template()
tdict['steps'][0]['priority'] = '42'
self._test_create_ok(tdict)
def test_create_complex_step_args(self):
tdict = test_api_utils.post_get_test_deploy_template()
tdict['steps'][0]['args'] = {'foo': [{'bar': 'baz'}]}
self._test_create_ok(tdict)
@mock.patch.object(objects.DeployTemplate, 'destroy', autospec=True)
class TestDelete(BaseDeployTemplatesAPITest):
def setUp(self):
super(TestDelete, self).setUp()
self.template = obj_utils.create_test_deploy_template(self.context)
@mock.patch.object(notification_utils, '_emit_api_notification',
autospec=True)
def test_delete_by_uuid(self, mock_notify, mock_destroy):
self.delete('/deploy_templates/%s' % self.template.uuid,
headers=self.headers)
mock_destroy.assert_called_once_with(mock.ANY)
mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.START),
mock.call(mock.ANY, mock.ANY, 'delete',
obj_fields.NotificationLevel.INFO,
obj_fields.NotificationStatus.END)])
def test_delete_by_uuid_with_json(self, mock_destroy):
self.delete('/deploy_templates/%s.json' % self.template.uuid,
headers=self.headers)
mock_destroy.assert_called_once_with(mock.ANY)
def test_delete_by_name(self, mock_destroy):
self.delete('/deploy_templates/%s' % self.template.name,
headers=self.headers)
mock_destroy.assert_called_once_with(mock.ANY)
def test_delete_by_name_with_json(self, mock_destroy):
self.delete('/deploy_templates/%s.json' % self.template.name,
headers=self.headers)
mock_destroy.assert_called_once_with(mock.ANY)
def test_delete_invalid_api_version(self, mock_dpt):
response = self.delete('/deploy_templates/%s' % self.template.uuid,
expect_errors=True,
headers=self.invalid_version_headers)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
def test_delete_old_api_version(self, mock_dpt):
# Names like CUSTOM_1 were not valid in API 1.1, but the check should
# go after the microversion check.
response = self.delete('/deploy_templates/%s' % self.template.name,
expect_errors=True)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
def test_delete_by_name_non_existent(self, mock_dpt):
res = self.delete('/deploy_templates/%s' % 'blah', expect_errors=True,
headers=self.headers)
self.assertEqual(http_client.NOT_FOUND, res.status_code)
| [
"unittest.mock.patch.object",
"ironic.tests.unit.api.utils.post_get_test_deploy_template",
"ironic.common.exception.DeployTemplateAlreadyExists",
"ironic.api.controllers.v1.max_version",
"oslo_utils.uuidutils.generate_uuid",
"oslo_utils.timeutils.parse_isotime",
"ironic.tests.unit.objects.utils.create_t... | [((13665, 13729), 'unittest.mock.patch.object', 'mock.patch.object', (['objects.DeployTemplate', '"""save"""'], {'autospec': '(True)'}), "(objects.DeployTemplate, 'save', autospec=True)\n", (13682, 13729), False, 'from unittest import mock\n'), ((39676, 39743), 'unittest.mock.patch.object', 'mock.patch.object', (['objects.DeployTemplate', '"""destroy"""'], {'autospec': '(True)'}), "(objects.DeployTemplate, 'destroy', autospec=True)\n", (39693, 39743), False, 'from unittest import mock\n'), ((14979, 15057), 'unittest.mock.patch.object', 'mock.patch.object', (['notification_utils', '"""_emit_api_notification"""'], {'autospec': '(True)'}), "(notification_utils, '_emit_api_notification', autospec=True)\n", (14996, 15057), False, 'from unittest import mock\n'), ((19689, 19767), 'unittest.mock.patch.object', 'mock.patch.object', (['notification_utils', '"""_emit_api_notification"""'], {'autospec': '(True)'}), "(notification_utils, '_emit_api_notification', autospec=True)\n", (19706, 19767), False, 'from unittest import mock\n'), ((29036, 29114), 'unittest.mock.patch.object', 'mock.patch.object', (['notification_utils', '"""_emit_api_notification"""'], {'autospec': '(True)'}), "(notification_utils, '_emit_api_notification', autospec=True)\n", (29053, 29114), False, 'from unittest import mock\n'), ((29143, 29196), 'unittest.mock.patch.object', 'mock.patch.object', (['timeutils', '"""utcnow"""'], {'autospec': '(True)'}), "(timeutils, 'utcnow', autospec=True)\n", (29160, 29196), False, 'from unittest import mock\n'), ((31698, 31767), 'unittest.mock.patch.object', 'mock.patch.object', (['notification_utils.LOG', '"""exception"""'], {'autospec': '(True)'}), "(notification_utils.LOG, 'exception', autospec=True)\n", (31715, 31767), False, 'from unittest import mock\n'), ((31773, 31840), 'unittest.mock.patch.object', 'mock.patch.object', (['notification_utils.LOG', '"""warning"""'], {'autospec': '(True)'}), "(notification_utils.LOG, 'warning', autospec=True)\n", (31790, 31840), False, 'from unittest import mock\n'), ((32403, 32481), 'unittest.mock.patch.object', 'mock.patch.object', (['notification_utils', '"""_emit_api_notification"""'], {'autospec': '(True)'}), "(notification_utils, '_emit_api_notification', autospec=True)\n", (32420, 32481), False, 'from unittest import mock\n'), ((32510, 32576), 'unittest.mock.patch.object', 'mock.patch.object', (['objects.DeployTemplate', '"""create"""'], {'autospec': '(True)'}), "(objects.DeployTemplate, 'create', autospec=True)\n", (32527, 32576), False, 'from unittest import mock\n'), ((39934, 40012), 'unittest.mock.patch.object', 'mock.patch.object', (['notification_utils', '"""_emit_api_notification"""'], {'autospec': '(True)'}), "(notification_utils, '_emit_api_notification', autospec=True)\n", (39951, 40012), False, 'from unittest import mock\n'), ((2011, 2062), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (2048, 2062), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((2522, 2573), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (2559, 2573), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((3269, 3320), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (3306, 3320), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((3560, 3630), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {'name': '"""CUSTOM_DT1"""'}), "(self.context, name='CUSTOM_DT1')\n", (3597, 3630), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((3924, 3975), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (3961, 3975), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((4928, 4979), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (4965, 4979), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((5437, 5488), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (5474, 5488), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((5804, 5855), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (5841, 5855), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((6148, 6199), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (6185, 6199), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((6617, 6668), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (6654, 6668), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((6985, 7036), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (7022, 7036), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((7404, 7455), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (7441, 7455), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((8257, 8282), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (8280, 8282), False, 'from oslo_utils import uuidutils\n'), ((8291, 8353), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {'uuid': 'uuid'}), '(self.context, uuid=uuid)\n', (8328, 8353), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((9457, 9501), 'oslo_config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""max_limit"""', '(3)', '"""api"""'], {}), "('max_limit', 3, 'api')\n", (9478, 9501), False, 'from oslo_config import cfg\n'), ((10064, 10108), 'oslo_config.cfg.CONF.set_override', 'cfg.CONF.set_override', (['"""max_limit"""', '(3)', '"""api"""'], {}), "('max_limit', 3, 'api')\n", (10085, 10108), False, 'from oslo_config import cfg\n'), ((13332, 13380), 'ironic.tests.unit.objects.utils.get_test_deploy_template', 'obj_utils.get_test_deploy_template', (['self.context'], {}), '(self.context)\n', (13366, 13380), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((13860, 13930), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {'name': '"""CUSTOM_DT1"""'}), "(self.context, name='CUSTOM_DT1')\n", (13897, 13930), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((19085, 19110), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (19108, 19110), False, 'from oslo_utils import uuidutils\n'), ((20117, 20179), 'ironic.common.exception.DeployTemplateAlreadyExists', 'exception.DeployTemplateAlreadyExists', ([], {'uuid': 'self.template.uuid'}), '(uuid=self.template.uuid)\n', (20154, 20179), False, 'from ironic.common import exception\n'), ((29266, 29312), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (29310, 29312), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((29333, 29368), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)', '(0)', '(0)'], {}), '(2000, 1, 1, 0, 0)\n', (29350, 29368), False, 'import datetime\n'), ((30768, 30814), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (30812, 30814), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((31922, 31968), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (31966, 31968), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((32698, 32744), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (32742, 32744), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((34089, 34144), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {'name': 'name'}), '(name=name)\n', (34133, 34144), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((34277, 34332), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {'name': 'name'}), '(name=name)\n', (34321, 34332), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((34472, 34527), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {'name': 'name'}), '(name=name)\n', (34516, 34527), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((34711, 34766), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {'name': 'name'}), '(name=name)\n', (34755, 34766), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((35192, 35249), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {'steps': 'steps'}), '(steps=steps)\n', (35236, 35249), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((35392, 35438), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (35436, 35438), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((35823, 35869), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (35867, 35869), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((36540, 36586), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (36584, 36586), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((37674, 37720), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (37718, 37720), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((39338, 39384), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (39382, 39384), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((39528, 39574), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (39572, 39574), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((39876, 39927), 'ironic.tests.unit.objects.utils.create_test_deploy_template', 'obj_utils.create_test_deploy_template', (['self.context'], {}), '(self.context)\n', (39913, 39927), True, 'from ironic.tests.unit.objects import utils as obj_utils\n'), ((1665, 1685), 'ironic.api.controllers.v1.max_version', 'api_v1.max_version', ([], {}), '()\n', (1683, 1685), True, 'from ironic.api.controllers import v1 as api_v1\n'), ((31097, 31198), 'unittest.mock.patch.object', 'mock.patch.object', (['self.dbapi', '"""create_deploy_template"""'], {'wraps': 'self.dbapi.create_deploy_template'}), "(self.dbapi, 'create_deploy_template', wraps=self.dbapi.\n create_deploy_template)\n", (31114, 31198), False, 'from unittest import mock\n'), ((31263, 31309), 'ironic.tests.unit.api.utils.post_get_test_deploy_template', 'test_api_utils.post_get_test_deploy_template', ([], {}), '()\n', (31307, 31309), True, 'from ironic.tests.unit.api import utils as test_api_utils\n'), ((32269, 32307), 'oslo_utils.uuidutils.is_uuid_like', 'uuidutils.is_uuid_like', (["result['uuid']"], {}), "(result['uuid'])\n", (32291, 32307), False, 'from oslo_utils import uuidutils\n'), ((15381, 15496), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""update"""', 'obj_fields.NotificationLevel.INFO', 'obj_fields.NotificationStatus.START'], {}), "(mock.ANY, mock.ANY, 'update', obj_fields.NotificationLevel.INFO,\n obj_fields.NotificationStatus.START)\n", (15390, 15496), False, 'from unittest import mock\n'), ((15608, 15721), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""update"""', 'obj_fields.NotificationLevel.INFO', 'obj_fields.NotificationStatus.END'], {}), "(mock.ANY, mock.ANY, 'update', obj_fields.NotificationLevel.INFO,\n obj_fields.NotificationStatus.END)\n", (15617, 15721), False, 'from unittest import mock\n'), ((20001, 20026), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (20024, 20026), False, 'from oslo_utils import uuidutils\n'), ((20830, 20945), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""update"""', 'obj_fields.NotificationLevel.INFO', 'obj_fields.NotificationStatus.START'], {}), "(mock.ANY, mock.ANY, 'update', obj_fields.NotificationLevel.INFO,\n obj_fields.NotificationStatus.START)\n", (20839, 20945), False, 'from unittest import mock\n'), ((21057, 21173), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""update"""', 'obj_fields.NotificationLevel.ERROR', 'obj_fields.NotificationStatus.ERROR'], {}), "(mock.ANY, mock.ANY, 'update', obj_fields.NotificationLevel.ERROR,\n obj_fields.NotificationStatus.ERROR)\n", (21066, 21173), False, 'from unittest import mock\n'), ((24918, 24943), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (24941, 24943), False, 'from oslo_utils import uuidutils\n'), ((26064, 26089), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (26087, 26089), False, 'from oslo_utils import uuidutils\n'), ((29854, 29899), 'oslo_utils.timeutils.parse_isotime', 'timeutils.parse_isotime', (["result['created_at']"], {}), "(result['created_at'])\n", (29877, 29899), False, 'from oslo_utils import timeutils\n'), ((30208, 30244), 'urllib.parse.urlparse', 'urlparse.urlparse', (['response.location'], {}), '(response.location)\n', (30225, 30244), True, 'from urllib import parse as urlparse\n'), ((30289, 30404), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""create"""', 'obj_fields.NotificationLevel.INFO', 'obj_fields.NotificationStatus.START'], {}), "(mock.ANY, mock.ANY, 'create', obj_fields.NotificationLevel.INFO,\n obj_fields.NotificationStatus.START)\n", (30298, 30404), False, 'from unittest import mock\n'), ((30516, 30629), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""create"""', 'obj_fields.NotificationLevel.INFO', 'obj_fields.NotificationStatus.END'], {}), "(mock.ANY, mock.ANY, 'create', obj_fields.NotificationLevel.INFO,\n obj_fields.NotificationStatus.END)\n", (30525, 30629), False, 'from unittest import mock\n'), ((32899, 33014), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""create"""', 'obj_fields.NotificationLevel.INFO', 'obj_fields.NotificationStatus.START'], {}), "(mock.ANY, mock.ANY, 'create', obj_fields.NotificationLevel.INFO,\n obj_fields.NotificationStatus.START)\n", (32908, 33014), False, 'from unittest import mock\n'), ((33126, 33242), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""create"""', 'obj_fields.NotificationLevel.ERROR', 'obj_fields.NotificationStatus.ERROR'], {}), "(mock.ANY, mock.ANY, 'create', obj_fields.NotificationLevel.ERROR,\n obj_fields.NotificationStatus.ERROR)\n", (33135, 33242), False, 'from unittest import mock\n'), ((40298, 40413), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""delete"""', 'obj_fields.NotificationLevel.INFO', 'obj_fields.NotificationStatus.START'], {}), "(mock.ANY, mock.ANY, 'delete', obj_fields.NotificationLevel.INFO,\n obj_fields.NotificationStatus.START)\n", (40307, 40413), False, 'from unittest import mock\n'), ((40525, 40638), 'unittest.mock.call', 'mock.call', (['mock.ANY', 'mock.ANY', '"""delete"""', 'obj_fields.NotificationLevel.INFO', 'obj_fields.NotificationStatus.END'], {}), "(mock.ANY, mock.ANY, 'delete', obj_fields.NotificationLevel.INFO,\n obj_fields.NotificationStatus.END)\n", (40534, 40638), False, 'from unittest import mock\n'), ((4450, 4475), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (4473, 4475), False, 'from oslo_utils import uuidutils\n'), ((7847, 7872), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (7870, 7872), False, 'from oslo_utils import uuidutils\n'), ((9006, 9031), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (9029, 9031), False, 'from oslo_utils import uuidutils\n'), ((9651, 9676), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (9674, 9676), False, 'from oslo_utils import uuidutils\n'), ((10302, 10327), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (10325, 10327), False, 'from oslo_utils import uuidutils\n'), ((11008, 11033), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (11031, 11033), False, 'from oslo_utils import uuidutils\n'), ((11587, 11612), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (11610, 11612), False, 'from oslo_utils import uuidutils\n'), ((12701, 12726), 'oslo_utils.uuidutils.generate_uuid', 'uuidutils.generate_uuid', ([], {}), '()\n', (12724, 12726), False, 'from oslo_utils import uuidutils\n')] |
import pandas as pd
import glob
import os
import yaml
import sys
def namesToReads(reference_dir, names_to_reads, salmon_dir):
if os.path.isfile(os.path.join(reference_dir,names_to_reads)):
print("Salmon reads file previously created; new file will not be created from Salmon directory.")
sys.exit(0)
folder_names = glob.glob(os.path.join(salmon_dir,'*quant*'))
files_salmon = [os.path.join(curr,"quant.sf") for curr in folder_names]
transcript_dict = dict()
transcript_sample_dict = dict()
for curr_ind in range(len(folder_names)):
curr_salmon = pd.read_csv(files_salmon[curr_ind], sep = "\t")
for curr_ind_2 in range(len(curr_salmon.index)):
name_curr = curr_salmon["Name"][curr_ind_2]
read_curr = float(curr_salmon["NumReads"][curr_ind_2])
sample_curr = folder_names[curr_ind].split("_")[-1]
if name_curr in transcript_dict:
transcript_dict[name_curr] = transcript_dict[name_curr] + read_curr
transcript_sample_dict[name_curr].append(sample_curr)
else:
transcript_dict[name_curr] = read_curr
transcript_sample_dict[name_curr] = [sample_curr]
names_to_reads = pd.DataFrame({"TranscriptNames": list(transcript_dict.keys()),
"NumReads": list(transcript_dict.values()),
"SampleName": list(transcript_sample_dict.values())})
if ".csv" in names_to_reads:
names_to_reads.to_csv(path_or_buf = os.path.join(reference_dir,names_to_reads), sep = "\t")
else:
names_to_reads.to_csv(path_or_buf = os.path.join(reference_dir,"namestoreads.csv"), sep = "\t")
names_to_reads = os.path.join(reference_dir,"namestoreads.csv")
return names_to_reads | [
"pandas.read_csv",
"os.path.join",
"sys.exit"
] | [((150, 193), 'os.path.join', 'os.path.join', (['reference_dir', 'names_to_reads'], {}), '(reference_dir, names_to_reads)\n', (162, 193), False, 'import os\n'), ((310, 321), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (318, 321), False, 'import sys\n'), ((352, 387), 'os.path.join', 'os.path.join', (['salmon_dir', '"""*quant*"""'], {}), "(salmon_dir, '*quant*')\n", (364, 387), False, 'import os\n'), ((408, 438), 'os.path.join', 'os.path.join', (['curr', '"""quant.sf"""'], {}), "(curr, 'quant.sf')\n", (420, 438), False, 'import os\n'), ((599, 644), 'pandas.read_csv', 'pd.read_csv', (['files_salmon[curr_ind]'], {'sep': '"""\t"""'}), "(files_salmon[curr_ind], sep='\\t')\n", (610, 644), True, 'import pandas as pd\n'), ((1761, 1808), 'os.path.join', 'os.path.join', (['reference_dir', '"""namestoreads.csv"""'], {}), "(reference_dir, 'namestoreads.csv')\n", (1773, 1808), False, 'import os\n'), ((1566, 1609), 'os.path.join', 'os.path.join', (['reference_dir', 'names_to_reads'], {}), '(reference_dir, names_to_reads)\n', (1578, 1609), False, 'import os\n'), ((1676, 1723), 'os.path.join', 'os.path.join', (['reference_dir', '"""namestoreads.csv"""'], {}), "(reference_dir, 'namestoreads.csv')\n", (1688, 1723), False, 'import os\n')] |
import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
month_list = ['January', 'February', 'March', 'April', 'May', 'June']
day_list = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
city_list = list(CITY_DATA.keys())
# This block gets the user's desired city, formats it, and checks it against the city_list variable.
city = input('Would you like to investigate Chicago, Washington, or New York City? ').lower()
while city not in city_list:
print('That\'s not a valid city for this program.')
city = input('Would you like to investigate Chicago, Washington, or New York City? ').lower()
# This block asks if the month needs to be filtered. If yes, it asks for the month name and checks it against the month_list variable.
# If no, it sets month = 'all'.
month_filter = input('Would you like to filter data by month? Enter yes or no: ').lower()
while month_filter != 'yes' and month_filter != 'no':
print('Not a valid input. Please specify yes or no')
month_filter = input('Would you like to filter data by month? Enter yes or no: ').lower()
if month_filter == 'yes':
month = input('Filter by which month? This program has data for January through June: ').title()
while month not in month_list:
print('Not a valid month for this program.')
month = input('Filter by which month? This program has data for January through June: ').title()
else:
month = 'all'
# This block asks if the day needs to be filtered. If yes, it checks the user's input of a day name against the day_list variable.
# If no, it sets the day variable = 'all'.
day_filter = input('Would you like to filter data by day? Enter yes or no: ').lower()
while day_filter != 'yes' and day_filter != 'no':
print('Please specify yes or no.')
day_filter = input('Would you like to filter data by day? Enter yes or no: ').lower()
if day_filter == 'yes':
day = input('Filter by which day? ').title()
while day not in day_list:
print('Not a valid day.')
day = input('Filter by which day? ').title()
else:
day = 'all'
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# This block loads the user's desired city's table into a DataFrame and uses Pandas datetime functionality to append additional columns
# needed later for time based statistics. It then filters the DataFrame by month and day if specified by the user.
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
df['hour'] = df['Start Time'].dt.hour
if month != 'all':
month = month_list.index(month)+1
df = df[df['month'] == month]
if day != 'all':
df = df[df['day_of_week'] == day]
return df
def condisplay_message(city, month, day):
"""
Prints a line with the user's selected filters in a string message.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
# This block prints a string with the user's selected filters. It's not completely necessary,
# but is useful as an easily read reminder of the filters used, located at the top of the program's output.
if month == 'all' and day == 'all':
print('For all months and all days in {}:\n'.format(city.title()))
elif month == 'all' and day != 'all':
print('For all months and all {}s in {}:\n'.format(day, city.title()))
elif month != 'all' and day == 'all':
print('For all days in the month of {} in {}:\n'.format(month, city.title()))
else:
print('For all {}s in the month of {} in {}:\n'.format(day, month, city.title()))
def time_stats(df, month, day):
"""
Displays statistics on the most frequent times of travel.
Args:
(DataFrame) df - pre-filtered Pandas DataFrame
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# This block finds and prints the most popular month statistics if a month filter was not selected by the user.
if month == 'all':
month_value_counts = df['month'].value_counts()
pop_month = month_list[month_value_counts.keys()[0]-1]
pop_month_count = month_value_counts.iloc[0]
print('The most popular month was {}, with {} bikerides.'.format(pop_month, pop_month_count))
# This block finds and prints the most popular day statistics if a day filter was not selected by the user.
if day == 'all':
day_value_counts = df['day_of_week'].value_counts()
pop_day = day_value_counts.keys()[0]
pop_day_count = day_value_counts.iloc[0]
print('The most popular day was {}, with {} bikerides.'.format(pop_day, pop_day_count))
# This block finds and prints the most popular hour statistics for any and all filters selected by the user.
hour_value_counts = df['hour'].value_counts()
pop_hour = hour_value_counts.keys()[0]
pop_hour_count = hour_value_counts.iloc[0]
print('The most popular hour was {} o\'clock with {} bikerides.'.format(pop_hour, pop_hour_count))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""
Displays statistics on the most popular stations and trip.
Args:
(DataFrame) df - pre-filtered Pandas DataFrame
"""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# Adds a column to the DataFrame to help find popular start to end trip locations.
df['Trip'] = df['Start Station'].str.cat(df['End Station'], sep=' to ')
# This block finds and prints the name of the most popular starting place, as well as total number of trips from that station.
start_station_counts = df['Start Station'].value_counts()
pop_start_station = start_station_counts.keys()[0]
pop_start_station_count = start_station_counts.iloc[0]
print('The most popular start station was {} with {} trips starting there.'.format(pop_start_station, pop_start_station_count))
# This block finds and prints the name of the most popular ending place, as well as total number of trips that end at that station.
end_station_counts = df['End Station'].value_counts()
pop_end_station = end_station_counts.keys()[0]
pop_end_station_count = end_station_counts.iloc[0]
print('The most popular ending station was {} with {} trips ending there.'.format(pop_end_station, pop_end_station_count))
# This block finds and prints the most popular trip, start to end, based on the new column created.
trip_counts = df['Trip'].value_counts()
pop_trip = trip_counts.keys()[0]
pop_trip_count = trip_counts.iloc[0]
print('The most popular trip was {} with {} trips.'.format(pop_trip, pop_trip_count))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_time_units(trip_time_type, trip_time):
"""
This program helps the trip_duration_stats() function decide what time units to use. The function checks if the time amount is
less than or equal to 2 times the next biggest unit. If it is, it checks the next size unit, if it's not, it prints the time with
the current unit. For example if its 1000 seconds, it checks if its less than or equal to 2 of the next biggest unit (minutes).
It is not, so it checks if its less than or equal to 2 of the next biggest unit (hours). It is so it stops at minutes and prints
the desired response.
It also only prints to 2 decimal places, for more easily read numbers.
Args:
(str) trip_time_type - indicates the type of travel time being calculated for the printed output. Either 'total', or 'mean'
(int) trip_time - trip time in seconds
"""
if trip_time <= 120:
print('The {} travel duration was {:.2f} seconds.'.format(trip_time_type, trip_time))
elif trip_time <= 120*60:
print('The {} travel duration was {:.2f} minutes'.format(trip_time_type, trip_time/60))
elif trip_time <= 48*60*60:
print('The {} travel duration was {:.2f} hours'.format(trip_time_type, trip_time/(60*60)))
elif trip_time <= 60*24*60*60:
print('The {} travel duration was {:.2f} days'.format(trip_time_type, trip_time/(60*60*24)))
elif trip_time <= 48*30*24*60*60:
print('The {} travel duration was {:.2f} months'.format(trip_time_type, trip_time/(60*60*24*30)))
else:
print('The {} travel duration was {:.2f} years'.format(trip_time_type, trip_time/(60*60*24*365)))
def trip_duration_stats(df):
"""
Displays statistics on the total and average trip duration.
Args:
(DataFrame) df - pre-filtered Pandas DataFrame
"""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# This block finds the total trip duration of all trips taken for the user's selected filters. It then displays that time
# in seconds, minutes, hours, days, months, or years, rounded to 2 decimal places.
total_trip_time = df['Trip Duration'].sum()
total_trip_type = 'total'
trip_time_units(total_trip_type, total_trip_time)
# This block finds the mean trip duration for all trips taken for the user's selected filters. It then displays that time
# in seconds, minutes, hours, days, months, or years, rounded to 2 decimal places.
mean_trip_time = df['Trip Duration'].mean()
mean_trip_type = 'mean'
trip_time_units(mean_trip_type, mean_trip_time)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df, city):
"""Displays statistics on bikeshare users.
Args:
(DataFrame) df - pre-filtered Pandas DataFrame
(str) city - name of the city to analyze
"""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Prints a Pandas Series of all user types and their counts, as well as filling all NaNs with N/A (not available).
print(df['User Type'].fillna('N/A').value_counts())
print()
# Because the Washington data table doesn't have a gender column, a try/except function is used to handle any KeyErrors.
# If there is no KeyError exception thrown up, this block will print a Pandas series with a count of gender types (male and female)
# as well as a column for N/A, filled with any NaN values.
try:
print(df['Gender'].fillna('N/A').value_counts())
print()
except KeyError:
print('{} has no gender data available.'.format(city.title()))
print()
# The Washington data table also has no birth year column, so another try/except block is used. If no KeyError comes up, this block will find and
# print values for the oldest birth year, most recent birth year, and the most common birth year, for the user's selected filters.
try:
oldest_birth_year = int(df['Birth Year'].min())
youngest_birth_year = int(df['Birth Year'].max())
common_birth_year = int(df['Birth Year'].mode()[0])
print('The earliest user birth year is {}. The most recent user birth year is {}. The most common user birth year is {}.'.format(
oldest_birth_year, youngest_birth_year, common_birth_year))
except KeyError:
print('{} has no birth year data available.'.format(city.title()))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_data(df):
"""
This function gives the user the option to see 5 rows of the raw data from their filtered DataFrame
and then see 5 more rows until they decide to stop.
Args:
(DataFrame) df - the Pandas DataFrame loaded based on the user's filters.
"""
# This code gets users yes or no input on seeing 5 rows of data, then asks again and continuously shows
# the next 5 rows until the user inputs no which stops the function.
display_input = input('Would you like to see the raw data (5 rows)? Enter yes or no: ').lower()
while display_input != 'yes' and display_input != 'no':
display_input = input('Not a valid input. Please enter yes or no: ').lower()
if display_input == 'yes':
i=0
while True:
print(df.iloc[i:i+5,:])
display_input = input('Would you like to see 5 more rows? Enter yes or no: ').lower()
while display_input != 'yes' and display_input != 'no':
display_input = input('Not a valid input. Please enter yes or no: ').lower()
if display_input == 'yes':
i += 5
else:
break
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
condisplay_message(city, month, day)
time_stats(df, month, day)
station_stats(df)
trip_duration_stats(df)
user_stats(df, city)
display_data(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| [
"pandas.read_csv",
"pandas.to_datetime",
"time.time"
] | [((3531, 3559), 'pandas.read_csv', 'pd.read_csv', (['CITY_DATA[city]'], {}), '(CITY_DATA[city])\n', (3542, 3559), True, 'import pandas as pd\n'), ((3583, 3615), 'pandas.to_datetime', 'pd.to_datetime', (["df['Start Time']"], {}), "(df['Start Time'])\n", (3597, 3615), True, 'import pandas as pd\n'), ((5451, 5462), 'time.time', 'time.time', ([], {}), '()\n', (5460, 5462), False, 'import time\n'), ((6974, 6985), 'time.time', 'time.time', ([], {}), '()\n', (6983, 6985), False, 'import time\n'), ((10332, 10343), 'time.time', 'time.time', ([], {}), '()\n', (10341, 10343), False, 'import time\n'), ((11395, 11406), 'time.time', 'time.time', ([], {}), '()\n', (11404, 11406), False, 'import time\n'), ((6674, 6685), 'time.time', 'time.time', ([], {}), '()\n', (6683, 6685), False, 'import time\n'), ((8380, 8391), 'time.time', 'time.time', ([], {}), '()\n', (8389, 8391), False, 'import time\n'), ((11083, 11094), 'time.time', 'time.time', ([], {}), '()\n', (11092, 11094), False, 'import time\n'), ((12940, 12951), 'time.time', 'time.time', ([], {}), '()\n', (12949, 12951), False, 'import time\n')] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 5