ngram
listlengths
0
67.8k
[ "than the input number \"\"\" def main(someList): chosenVal = int(input(\"Enter a number.\\n\" +", "user for a number. - In one line, print a new list that", "- In one line, print a new list that contains all elements from", "= [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89] main(testList)", "all elements from the original list that are less than the input number", "\")) print(\"The given list is:\", someList) print(\"The new list is: \", list(val for", "input number \"\"\" def main(someList): chosenVal = int(input(\"Enter a number.\\n\" + \"All values", "someList if val < chosenVal)) if __name__ == \"__main__\": testList = [1, 1,", "GIVEN: A list of numbers - Ask user for a number. - In", "this number will be filtered out.\\n\" + \"--> \")) print(\"The given list is:\",", "\"--> \")) print(\"The given list is:\", someList) print(\"The new list is: \", list(val", "\"__main__\": testList = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55,", "In one line, print a new list that contains all elements from the", "elements from the original list that are less than the input number \"\"\"", "python3 \"\"\"Example 3 from https://www.practicepython.org/exercise/2014/02/15/03-list-less-than-ten.html ========================== GIVEN: A list of numbers - Ask", "list is:\", someList) print(\"The new list is: \", list(val for val in someList", "print a new list that contains all elements from the original list that", "a number.\\n\" + \"All values in a given list larger than or equal", "from the original list that are less than the input number \"\"\" def", "from https://www.practicepython.org/exercise/2014/02/15/03-list-less-than-ten.html ========================== GIVEN: A list of numbers - Ask user for a", "if val < chosenVal)) if __name__ == \"__main__\": testList = [1, 1, 2,", "someList) print(\"The new list is: \", list(val for val in someList if val", "https://www.practicepython.org/exercise/2014/02/15/03-list-less-than-ten.html ========================== GIVEN: A list of numbers - Ask user for a number.", "a new list that contains all elements from the original list that are", "main(someList): chosenVal = int(input(\"Enter a number.\\n\" + \"All values in a given list", "one line, print a new list that contains all elements from the original", "val in someList if val < chosenVal)) if __name__ == \"__main__\": testList =", "\", list(val for val in someList if val < chosenVal)) if __name__ ==", "a given list larger than or equal to this number will be filtered", "new list is: \", list(val for val in someList if val < chosenVal))", "number will be filtered out.\\n\" + \"--> \")) print(\"The given list is:\", someList)", "less than the input number \"\"\" def main(someList): chosenVal = int(input(\"Enter a number.\\n\"", "to this number will be filtered out.\\n\" + \"--> \")) print(\"The given list", "list that contains all elements from the original list that are less than", "out.\\n\" + \"--> \")) print(\"The given list is:\", someList) print(\"The new list is:", "number \"\"\" def main(someList): chosenVal = int(input(\"Enter a number.\\n\" + \"All values in", "the original list that are less than the input number \"\"\" def main(someList):", "val < chosenVal)) if __name__ == \"__main__\": testList = [1, 1, 2, 3,", "given list larger than or equal to this number will be filtered out.\\n\"", "be filtered out.\\n\" + \"--> \")) print(\"The given list is:\", someList) print(\"The new", "that contains all elements from the original list that are less than the", "is:\", someList) print(\"The new list is: \", list(val for val in someList if", "print(\"The new list is: \", list(val for val in someList if val <", "list that are less than the input number \"\"\" def main(someList): chosenVal =", "#!usr/bin/env python3 \"\"\"Example 3 from https://www.practicepython.org/exercise/2014/02/15/03-list-less-than-ten.html ========================== GIVEN: A list of numbers -", "testList = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]", "equal to this number will be filtered out.\\n\" + \"--> \")) print(\"The given", "a number. - In one line, print a new list that contains all", "list larger than or equal to this number will be filtered out.\\n\" +", "than or equal to this number will be filtered out.\\n\" + \"--> \"))", "if __name__ == \"__main__\": testList = [1, 1, 2, 3, 5, 8, 13,", "3 from https://www.practicepython.org/exercise/2014/02/15/03-list-less-than-ten.html ========================== GIVEN: A list of numbers - Ask user for", "are less than the input number \"\"\" def main(someList): chosenVal = int(input(\"Enter a", "int(input(\"Enter a number.\\n\" + \"All values in a given list larger than or", "== \"__main__\": testList = [1, 1, 2, 3, 5, 8, 13, 21, 34,", "def main(someList): chosenVal = int(input(\"Enter a number.\\n\" + \"All values in a given", "will be filtered out.\\n\" + \"--> \")) print(\"The given list is:\", someList) print(\"The", "\"\"\" def main(someList): chosenVal = int(input(\"Enter a number.\\n\" + \"All values in a", "\"\"\"Example 3 from https://www.practicepython.org/exercise/2014/02/15/03-list-less-than-ten.html ========================== GIVEN: A list of numbers - Ask user", "A list of numbers - Ask user for a number. - In one", "\"All values in a given list larger than or equal to this number", "__name__ == \"__main__\": testList = [1, 1, 2, 3, 5, 8, 13, 21,", "for val in someList if val < chosenVal)) if __name__ == \"__main__\": testList", "or equal to this number will be filtered out.\\n\" + \"--> \")) print(\"The", "= int(input(\"Enter a number.\\n\" + \"All values in a given list larger than", "+ \"--> \")) print(\"The given list is:\", someList) print(\"The new list is: \",", "original list that are less than the input number \"\"\" def main(someList): chosenVal", "line, print a new list that contains all elements from the original list", "chosenVal = int(input(\"Enter a number.\\n\" + \"All values in a given list larger", "new list that contains all elements from the original list that are less", "for a number. - In one line, print a new list that contains", "values in a given list larger than or equal to this number will", "+ \"All values in a given list larger than or equal to this", "number. - In one line, print a new list that contains all elements", "filtered out.\\n\" + \"--> \")) print(\"The given list is:\", someList) print(\"The new list", "========================== GIVEN: A list of numbers - Ask user for a number. -", "list of numbers - Ask user for a number. - In one line,", "list(val for val in someList if val < chosenVal)) if __name__ == \"__main__\":", "in a given list larger than or equal to this number will be", "in someList if val < chosenVal)) if __name__ == \"__main__\": testList = [1,", "the input number \"\"\" def main(someList): chosenVal = int(input(\"Enter a number.\\n\" + \"All", "numbers - Ask user for a number. - In one line, print a", "of numbers - Ask user for a number. - In one line, print", "number.\\n\" + \"All values in a given list larger than or equal to", "is: \", list(val for val in someList if val < chosenVal)) if __name__", "< chosenVal)) if __name__ == \"__main__\": testList = [1, 1, 2, 3, 5,", "Ask user for a number. - In one line, print a new list", "print(\"The given list is:\", someList) print(\"The new list is: \", list(val for val", "given list is:\", someList) print(\"The new list is: \", list(val for val in", "list is: \", list(val for val in someList if val < chosenVal)) if", "contains all elements from the original list that are less than the input", "- Ask user for a number. - In one line, print a new", "chosenVal)) if __name__ == \"__main__\": testList = [1, 1, 2, 3, 5, 8,", "that are less than the input number \"\"\" def main(someList): chosenVal = int(input(\"Enter", "larger than or equal to this number will be filtered out.\\n\" + \"-->" ]
[ "above618, below618, alpha=0.1, color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index,", "0.95只找最高的,即顶部只要最高的 _, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90])", "lw=2.5, color='k') plt.axhline(above700, lw=2.5, color='m') # 中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r') plt.axhline(below618, lw=1.5, color='r')", "'unknown' # 计算视觉黄金分割 gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割 gex_382, gex_500,", "0.25, 0.30只找最低的,即底部只要最低的 below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ =", "0.90, 0.95只找最高的,即顶部只要最高的 _, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 =", "import matplotlib.pyplot as plt from ..TLineBu import ABuTLExecute from ..UtilBu.ABuDTUtil import plt_show __author__", "color='r') plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index, above618, below618, alpha=0.1, color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5,", "plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700', 'above618', 'below618', 'above382', 'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05,", "0.25, 0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200, lw=3.5, color='c') _", "<reponame>luqin/firefly # -*- encoding:utf-8 -*- \"\"\" 黄金分割及比例分割示例模块 \"\"\" from __future__ import print_function from", "= ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80,", "lw=1.5, color='g') plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index, above382, below382, alpha=0.1, color=\"g\") # 0.20, 0.25,", "__weixin__ = 'abu_quant' def calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象 :param", "plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index, above382, below382, alpha=0.1, color=\"g\") # 0.20, 0.25, 0.30 lw线条粗度递曾", "0.80, 0.90, 0.95] pts_dict = ABuTLExecute.find_percent_point(percents, kl_close) # 0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200, _", "plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200, lw=3.5, color='c') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30)", "0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _", "lw=2.5, color='m') # 中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r') plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index, above618, below618,", "lw=3.0, color='y') plt.axhline(below200, lw=3.5, color='c') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950', 'above900', 'above800',", "'gex500', 'g618', 'gex618', 'above618', 'below618', 'above382', 'below382', 'above950', 'above900', 'above800', 'above700', 'below300', 'below250',", "'below250', 'below200'])( gd_382, gex_382, gd_500, gex_500, gd_618, gex_618, above618, below618, above382, below382, above950,", "above382 = ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20, 0.25, 0.30, 0.70, 0.80,", "kl_pd.close if not hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown' # 计算视觉黄金分割 gd_382,", "color='g') plt.fill_between(kl_pd.index, above382, below382, alpha=0.1, color=\"g\") # 0.20, 0.25, 0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5,", "above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with plt_show(): # 开始可视化黄金分割及比例分割结果", "color='r') plt.fill_between(kl_pd.index, above618, below618, alpha=0.1, color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382, lw=2,", "loc=2, borderaxespad=0.) plt.title('between golden') return namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500', 'g618', 'gex618', 'above618',", "below above 382, 618确定,即382,618上下底 below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382 = ABuTLExecute.below_above_gen(gd_382,", "# 0.70, 0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800, lw=2.5,", "color='c') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700', 'above618', 'below618', 'above382',", "..TLineBu import ABuTLExecute from ..UtilBu.ABuDTUtil import plt_show __author__ = '阿布' __weixin__ = 'abu_quant'", "plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700', 'above618', 'below618', 'above382', 'below382', 'below300', 'below250',", "_ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70,", "\"\"\" 黄金分割及比例分割示例模块 \"\"\" from __future__ import print_function from __future__ import absolute_import from __future__", "'above700', 'above618', 'below618', 'above382', 'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('between", "中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r') plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index, above618, below618, alpha=0.1, color=\"r\") #", "_ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700', 'above618', 'below618', 'above382', 'below382',", "'above382', 'below382', 'above950', 'above900', 'above800', 'above700', 'below300', 'below250', 'below200'])( gd_382, gex_382, gd_500, gex_500,", "618确定,即382,618上下底 below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict", "ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20, 0.25, 0.30, 0.70, 0.80, 0.90, 0.95]", "# 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index, above382, below382, alpha=0.1, color=\"g\")", "as plt from ..TLineBu import ABuTLExecute from ..UtilBu.ABuDTUtil import plt_show __author__ = '阿布'", "\"\"\" kl_close = kl_pd.close if not hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown'", "= 'abu_quant' def calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象 :param show:", "kl_close = kl_pd.close if not hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown' #", "'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown' # 计算视觉黄金分割 gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index,", "ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below above 382, 618确定,即382,618上下底 below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618) below382,", "\"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close", "_ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _,", "lw=2, color='r') plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index, above618, below618, alpha=0.1, color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382,", "kl_close) # 计算统计黄金分割 gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below above 382,", "color='g') plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index, above382, below382, alpha=0.1, color=\"g\") # 0.20, 0.25, 0.30", "金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close = kl_pd.close if not hasattr(kl_pd,", "计算统计黄金分割 gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below above 382, 618确定,即382,618上下底 below618,", "'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('between golden') return namedtuple('golden', ['g382', 'gex382', 'g500',", "namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500', 'g618', 'gex618', 'above618', 'below618', 'above382', 'below382', 'above950', 'above900',", "ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80, 0.90,", "import print_function from __future__ import absolute_import from __future__ import division from collections import", "'阿布' __weixin__ = 'abu_quant' def calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象", "color='k') plt.axhline(above700, lw=2.5, color='m') # 中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r') plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index,", "= plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700', 'above618', 'below618', 'above382', 'below382', 'below300',", "lw=3.5, color='c') plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700, lw=2.5, color='m') # 中间层的618是带,有上下底", "# 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20, 0.25, 0.30, 0.70, 0.80, 0.90, 0.95] pts_dict =", "__author__ = '阿布' __weixin__ = 'abu_quant' def calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param", "中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index, above382, below382, alpha=0.1, color=\"g\") #", "'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('between golden') return namedtuple('golden', ['g382',", "_, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if", "黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close = kl_pd.close if not hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name =", "plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index, above382, below382, alpha=0.1, color=\"g\") # 0.20,", "0.70, 0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800, lw=2.5, color='k')", ":param show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close = kl_pd.close if not hasattr(kl_pd, 'name'):", "__future__ import print_function from __future__ import absolute_import from __future__ import division from collections", "0.95]) plt.plot(kl_close) # 0.70, 0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900, lw=3.0, color='y')", "= ABuTLExecute.find_percent_point(percents, kl_close) # 0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _", "lw=2, color='g') plt.fill_between(kl_pd.index, above382, below382, alpha=0.1, color=\"g\") # 0.20, 0.25, 0.30 lw线条粗度递曾 plt.axhline(below300,", "from __future__ import absolute_import from __future__ import division from collections import namedtuple import", "kl_close) # 0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25])", "plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700, lw=2.5, color='m') # 中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r') plt.axhline(below618, lw=1.5,", "再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20, 0.25, 0.30, 0.70, 0.80, 0.90, 0.95] pts_dict = ABuTLExecute.find_percent_point(percents,", "gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割 gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) #", "show: with plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95, 0.95]) plt.plot(kl_close) # 0.70, 0.80,", "below618, alpha=0.1, color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index, above382,", "alpha=0.1, color=\"g\") # 0.20, 0.25, 0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250, lw=3.0, color='y')", "'below618', 'above382', 'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('between golden') return", "0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700,", "plt.title('between golden') return namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500', 'g618', 'gex618', 'above618', 'below618', 'above382',", "gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割 gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index,", "def calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果 :return:", "['g382', 'gex382', 'g500', 'gex500', 'g618', 'gex618', 'above618', 'below618', 'above382', 'below382', 'above950', 'above900', 'above800',", "0.30, 0.70, 0.80, 0.90, 0.95] pts_dict = ABuTLExecute.find_percent_point(percents, kl_close) # 0.20, 0.25, 0.30只找最低的,即底部只要最低的", "plt.plot(kl_close) # 0.70, 0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800,", ":return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close = kl_pd.close if not hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name", "import absolute_import from __future__ import division from collections import namedtuple import matplotlib.pyplot as", "from collections import namedtuple import matplotlib.pyplot as plt from ..TLineBu import ABuTLExecute from", "not hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown' # 计算视觉黄金分割 gd_382, gd_500, gd_618", "plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200, lw=3.5, color='c') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950', 'above900',", "'below300', 'below250', 'below200'])( gd_382, gex_382, gd_500, gex_500, gd_618, gex_618, above618, below618, above382, below382,", "开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95, 0.95]) plt.plot(kl_close) # 0.70, 0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5,", "import namedtuple import matplotlib.pyplot as plt from ..TLineBu import ABuTLExecute from ..UtilBu.ABuDTUtil import", "print_function from __future__ import absolute_import from __future__ import division from collections import namedtuple", "hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown' # 计算视觉黄金分割 gd_382, gd_500, gd_618 =", "kl_close) # below above 382, 618确定,即382,618上下底 below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382", "calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象", "'above618', 'below618', 'above382', 'below382', 'above950', 'above900', 'above800', 'above700', 'below300', 'below250', 'below200'])( gd_382, gex_382,", "= ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _, above700", "= ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95, 0.95]) plt.plot(kl_close)", "= ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below above 382, 618确定,即382,618上下底 below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618)", "# 中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r') plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index, above618, below618, alpha=0.1, color=\"r\")", "below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70])", "'g500', 'gex500', 'g618', 'gex618', 'above618', 'below618', 'above382', 'below382', 'above950', 'above900', 'above800', 'above700', 'below300',", "# -*- encoding:utf-8 -*- \"\"\" 黄金分割及比例分割示例模块 \"\"\" from __future__ import print_function from __future__", "只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close =", "above382, below382, alpha=0.1, color=\"g\") # 0.20, 0.25, 0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250,", "plt.fill_between(kl_pd.index, above618, below618, alpha=0.1, color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382, lw=2, color='g')", "above 382, 618确定,即382,618上下底 below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382)", "-*- encoding:utf-8 -*- \"\"\" 黄金分割及比例分割示例模块 \"\"\" from __future__ import print_function from __future__ import", "namedtuple import matplotlib.pyplot as plt from ..TLineBu import ABuTLExecute from ..UtilBu.ABuDTUtil import plt_show", ":param kl_pd: 金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close = kl_pd.close if", "_, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95,", "0.025, 0.95, 0.95]) plt.plot(kl_close) # 0.70, 0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900,", "above618 = ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents =", "show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\"", "color='y') plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700, lw=2.5, color='m') # 中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r') plt.axhline(below618,", "division from collections import namedtuple import matplotlib.pyplot as plt from ..TLineBu import ABuTLExecute", "ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20, 0.25,", "above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show:", "gex_500, gd_618, gex_618, above618, below618, above382, below382, above950, above900, above800, above700, below300, below250,", "matplotlib.pyplot as plt from ..TLineBu import ABuTLExecute from ..UtilBu.ABuDTUtil import plt_show __author__ =", "= '阿布' __weixin__ = 'abu_quant' def calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd:", "above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950", "'below200'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('between golden') return namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500',", "plt.axhline(below200, lw=3.5, color='c') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700', 'above618',", "alpha=0.1, color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index, above382, below382,", "'g618', 'gex618', 'above618', 'below618', 'above382', 'below382', 'above950', 'above900', 'above800', 'above700', 'below300', 'below250', 'below200'])(", "计算视觉黄金分割 gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割 gex_382, gex_500, gex_618 =", "'above618', 'below618', 'above382', 'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('between golden')", "'gex618', 'above618', 'below618', 'above382', 'below382', 'above950', 'above900', 'above800', 'above700', 'below300', 'below250', 'below200'])( gd_382,", "gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20, 0.25, 0.30, 0.70, 0.80, 0.90, 0.95] pts_dict", "below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的", "ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割 gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below above", "= 'unknown' # 计算视觉黄金分割 gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割 gex_382,", "ABuTLExecute.find_percent_point(percents, kl_close) # 0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ =", "below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents", "above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95, 0.95])", "rotation=30) plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700', 'above618', 'below618', 'above382', 'below382', 'below300', 'below250', 'below200'],", "collections import namedtuple import matplotlib.pyplot as plt from ..TLineBu import ABuTLExecute from ..UtilBu.ABuDTUtil", "__future__ import division from collections import namedtuple import matplotlib.pyplot as plt from ..TLineBu", "= ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800", "0.95, 0.95]) plt.plot(kl_close) # 0.70, 0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900, lw=3.0,", "_ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _,", "color='m') # 中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r') plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index, above618, below618, alpha=0.1,", "plt.axes([0.025, 0.025, 0.95, 0.95]) plt.plot(kl_close) # 0.70, 0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c')", "0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _,", "-*- \"\"\" 黄金分割及比例分割示例模块 \"\"\" from __future__ import print_function from __future__ import absolute_import from", "bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('between golden') return namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500', 'g618',", "pts_dict = ABuTLExecute.find_percent_point(percents, kl_close) # 0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250,", "if not hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown' # 计算视觉黄金分割 gd_382, gd_500,", "gd_618, gex_618, above618, below618, above382, below382, above950, above900, above800, above700, below300, below250, below200)", "# 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown' # 计算视觉黄金分割 gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close)", "..UtilBu.ABuDTUtil import plt_show __author__ = '阿布' __weixin__ = 'abu_quant' def calc_golden(kl_pd, show=True): \"\"\"", "1), loc=2, borderaxespad=0.) plt.title('between golden') return namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500', 'g618', 'gex618',", "'above900', 'above800', 'above700', 'above618', 'below618', 'above382', 'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2,", "below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) #", "'above950', 'above900', 'above800', 'above700', 'above618', 'below618', 'above382', 'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1),", "gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割 gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close)", "import plt_show __author__ = '阿布' __weixin__ = 'abu_quant' def calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割", "plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95, 0.95]) plt.plot(kl_close) # 0.70, 0.80, 0.90, 0.95,lw线条粗度递减", "ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _, above700 =", "plt from ..TLineBu import ABuTLExecute from ..UtilBu.ABuDTUtil import plt_show __author__ = '阿布' __weixin__", "# 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95, 0.95]) plt.plot(kl_close) # 0.70, 0.80, 0.90, 0.95,lw线条粗度递减 plt.axhline(above950,", "_, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _,", "# below above 382, 618确定,即382,618上下底 below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382 =", "plt.axhline(above700, lw=2.5, color='m') # 中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r') plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index, above618,", "plt_show __author__ = '阿布' __weixin__ = 'abu_quant' def calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化", "plt.fill_between(kl_pd.index, above382, below382, alpha=0.1, color=\"g\") # 0.20, 0.25, 0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k')", "'gex382', 'g500', 'gex500', 'g618', 'gex618', 'above618', 'below618', 'above382', 'below382', 'above950', 'above900', 'above800', 'above700',", "'above900', 'above800', 'above700', 'below300', 'below250', 'below200'])( gd_382, gex_382, gd_500, gex_500, gd_618, gex_618, above618,", "below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20, 0.25, 0.30, 0.70,", "= ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025,", "gd_500, gex_500, gd_618, gex_618, above618, below618, above382, below382, above950, above900, above800, above700, below300,", "from __future__ import division from collections import namedtuple import matplotlib.pyplot as plt from", "382, 618确定,即382,618上下底 below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382) #", "数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close = kl_pd.close", "golden') return namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500', 'g618', 'gex618', 'above618', 'below618', 'above382', 'below382',", "ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95])", "__future__ import absolute_import from __future__ import division from collections import namedtuple import matplotlib.pyplot", "是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close = kl_pd.close if not hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown", "lw=1.5, color='r') plt.fill_between(kl_pd.index, above618, below618, alpha=0.1, color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382,", "gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below above 382, 618确定,即382,618上下底 below618, above618 =", "show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close = kl_pd.close if not hasattr(kl_pd, 'name'): #", "'above700', 'below300', 'below250', 'below200'])( gd_382, gex_382, gd_500, gex_500, gd_618, gex_618, above618, below618, above382,", "0.25, 0.30, 0.70, 0.80, 0.90, 0.95] pts_dict = ABuTLExecute.find_percent_point(percents, kl_close) # 0.20, 0.25,", "= ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 =", "'below200'])( gd_382, gex_382, gd_500, gex_500, gd_618, gex_618, above618, below618, above382, below382, above950, above900,", "plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700, lw=2.5, color='m') #", "ABuTLExecute from ..UtilBu.ABuDTUtil import plt_show __author__ = '阿布' __weixin__ = 'abu_quant' def calc_golden(kl_pd,", "gd_382, gex_382, gd_500, gex_500, gd_618, gex_618, above618, below618, above382, below382, above950, above900, above800,", "0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700, lw=2.5, color='m')", "gex_618) below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20, 0.25, 0.30,", "from __future__ import print_function from __future__ import absolute_import from __future__ import division from", "color='c') plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700, lw=2.5, color='m') # 中间层的618是带,有上下底 plt.axhline(above618,", "color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g') plt.axhline(below382, lw=2, color='g') plt.fill_between(kl_pd.index, above382, below382, alpha=0.1,", "import ABuTLExecute from ..UtilBu.ABuDTUtil import plt_show __author__ = '阿布' __weixin__ = 'abu_quant' def", "金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown' # 计算视觉黄金分割 gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close) #", "0.70, 0.80, 0.90, 0.95] pts_dict = ABuTLExecute.find_percent_point(percents, kl_close) # 0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200,", "ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025,", "plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700, lw=2.5, color='m') # 中间层的618是带,有上下底 plt.axhline(above618, lw=2,", "= ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with", "'above800', 'above700', 'above618', 'below618', 'above382', 'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)", "kl_pd: 金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果 :return: 黄金分割及比例分割结果组成的namedtuple数值对象 \"\"\" kl_close = kl_pd.close if not", "'below382', 'above950', 'above900', 'above800', 'above700', 'below300', 'below250', 'below200'])( gd_382, gex_382, gd_500, gex_500, gd_618,", "= ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20, 0.25, 0.30, 0.70, 0.80, 0.90,", "0.95] pts_dict = ABuTLExecute.find_percent_point(percents, kl_close) # 0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20])", "= ABuTLExecute.below_above_gen(gd_618, gex_618) below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382) # 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict percents = [0.20,", "below382, alpha=0.1, color=\"g\") # 0.20, 0.25, 0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250, lw=3.0,", "from ..TLineBu import ABuTLExecute from ..UtilBu.ABuDTUtil import plt_show __author__ = '阿布' __weixin__ =", "# 计算视觉黄金分割 gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割 gex_382, gex_500, gex_618", "kl_pd.name = 'unknown' # 计算视觉黄金分割 gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割", "color=\"g\") # 0.20, 0.25, 0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200,", "\"\"\" from __future__ import print_function from __future__ import absolute_import from __future__ import division", "lw=2.5, color='k') plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200, lw=3.5, color='c') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name,", "import division from collections import namedtuple import matplotlib.pyplot as plt from ..TLineBu import", "'above950', 'above900', 'above800', 'above700', 'below300', 'below250', 'below200'])( gd_382, gex_382, gd_500, gex_500, gd_618, gex_618,", "'above800', 'above700', 'below300', 'below250', 'below200'])( gd_382, gex_382, gd_500, gex_500, gd_618, gex_618, above618, below618,", "gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below above 382, 618确定,即382,618上下底 below618, above618", "0.90, 0.95,lw线条粗度递减 plt.axhline(above950, lw=3.5, color='c') plt.axhline(above900, lw=3.0, color='y') plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700, lw=2.5,", "'abu_quant' def calc_golden(kl_pd, show=True): \"\"\" 只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割 数值结果分析以及可视化 :param kl_pd: 金融时间序列,pd.DataFrame对象 :param show: 是否可视化黄金分割及比例分割结果", "lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200, lw=3.5, color='c') _ = plt.setp(plt.gca().get_xticklabels(),", "'above382', 'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('between golden') return namedtuple('golden',", "ABuTLExecute.below_above_gen(*pts_dict[0.30]) # 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 =", "borderaxespad=0.) plt.title('between golden') return namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500', 'g618', 'gex618', 'above618', 'below618',", "percents = [0.20, 0.25, 0.30, 0.70, 0.80, 0.90, 0.95] pts_dict = ABuTLExecute.find_percent_point(percents, kl_close)", "lw=3.0, color='y') plt.axhline(above800, lw=2.5, color='k') plt.axhline(above700, lw=2.5, color='m') # 中间层的618是带,有上下底 plt.axhline(above618, lw=2, color='r')", "plt.axhline(above618, lw=2, color='r') plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index, above618, below618, alpha=0.1, color=\"r\") # 中间层的382是带,有上下底", "0.20, 0.25, 0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200, lw=3.5, color='c')", "lw=3.5, color='c') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700', 'above618', 'below618',", "黄金分割及比例分割示例模块 \"\"\" from __future__ import print_function from __future__ import absolute_import from __future__ import", "encoding:utf-8 -*- \"\"\" 黄金分割及比例分割示例模块 \"\"\" from __future__ import print_function from __future__ import absolute_import", "# 0.20, 0.25, 0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200, lw=3.5,", "gex_382, gd_500, gex_500, gd_618, gex_618, above618, below618, above382, below382, above950, above900, above800, above700,", "= ABuTLExecute.find_golden_point(kl_pd.index, kl_close) # 计算统计黄金分割 gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below", "plt.axhline(below618, lw=1.5, color='r') plt.fill_between(kl_pd.index, above618, below618, alpha=0.1, color=\"r\") # 中间层的382是带,有上下底 plt.axhline(above382, lw=1.5, color='g')", "color='y') plt.axhline(below200, lw=3.5, color='c') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700',", "gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below above 382, 618确定,即382,618上下底 below618, above618 = ABuTLExecute.below_above_gen(gd_618,", "from ..UtilBu.ABuDTUtil import plt_show __author__ = '阿布' __weixin__ = 'abu_quant' def calc_golden(kl_pd, show=True):", "absolute_import from __future__ import division from collections import namedtuple import matplotlib.pyplot as plt", "0.30只找最低的,即底部只要最低的 below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30])", "# 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80])", "_, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with plt_show(): #", "'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('between golden') return namedtuple('golden', ['g382', 'gex382',", "0.80, 0.90, 0.95只找最高的,即顶部只要最高的 _, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70]) _, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900", "ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95, 0.95]) plt.plot(kl_close) #", "color='k') plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200, lw=3.5, color='c') _ = plt.setp(plt.gca().get_xticklabels(), rotation=30) plt.legend([kl_pd.name, 'above950',", "[0.20, 0.25, 0.30, 0.70, 0.80, 0.90, 0.95] pts_dict = ABuTLExecute.find_percent_point(percents, kl_close) # 0.20,", "# 计算统计黄金分割 gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close) # below above 382, 618确定,即382,618上下底", "= [0.20, 0.25, 0.30, 0.70, 0.80, 0.90, 0.95] pts_dict = ABuTLExecute.find_percent_point(percents, kl_close) #", "ABuTLExecute.below_above_gen(*pts_dict[0.80]) _, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90]) _, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95]) if show: with plt_show():", "# 0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20]) below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25]) below300,", "return namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500', 'g618', 'gex618', 'above618', 'below618', 'above382', 'below382', 'above950',", "0.30 lw线条粗度递曾 plt.axhline(below300, lw=2.5, color='k') plt.axhline(below250, lw=3.0, color='y') plt.axhline(below200, lw=3.5, color='c') _ =", "= kl_pd.close if not hasattr(kl_pd, 'name'): # 金融时间序列中如果有异常的没有name信息的补上一个unknown kl_pd.name = 'unknown' # 计算视觉黄金分割", "if show: with plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95, 0.95]) plt.plot(kl_close) # 0.70,", "with plt_show(): # 开始可视化黄金分割及比例分割结果 plt.axes([0.025, 0.025, 0.95, 0.95]) plt.plot(kl_close) # 0.70, 0.80, 0.90,", "0.90, 0.95] pts_dict = ABuTLExecute.find_percent_point(percents, kl_close) # 0.20, 0.25, 0.30只找最低的,即底部只要最低的 below200, _ =", "'below618', 'above382', 'below382', 'above950', 'above900', 'above800', 'above700', 'below300', 'below250', 'below200'])( gd_382, gex_382, gd_500," ]
[ "= {} class TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def", "if args['description'] == True: print(Main.__doc__) sys.exit() if args['tests'] == True: suite = unittest.TestSuite()", "VALUES_INPUT = {} VALUES_OUTPUT = {} class TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def", "Main(): '''No describe''' global VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program", "== 'win32': Run('explorer.exe', path) def Main(): '''No describe''' global VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT", "with open(args['file_input']) as json_file: VALUES_INPUT = json.load(json_file) param = ' '.join(unknown) Main() if", "'FOO') def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def Run(command, parameters=None): if(parameters != None): subprocess.Popen([command, parameters],", "via file (path)') parser.add_argument('-o','--file_output', help='output data via file (path)') args, unknown = parser.parse_known_args()", "!= None): subprocess.Popen([command, parameters], shell=True) else: subprocess.Popen(command, shell=True) def OpenFolder(path): if sys.platform ==", "import sys,os import subprocess import argparse import unittest VALUES_INPUT = {} VALUES_OUTPUT =", "action='store_true') parser.add_argument('-u','--tests', help='Execute tests', action='store_true') parser.add_argument('-i','--file_input', help='data entry via file (path)') parser.add_argument('-o','--file_output', help='output", "data via file (path)') args, unknown = parser.parse_known_args() args = vars(args) if args['description']", "global VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test'", "sys,os import subprocess import argparse import unittest VALUES_INPUT = {} VALUES_OUTPUT = {}", "(path)') args, unknown = parser.parse_known_args() args = vars(args) if args['description'] == True: print(Main.__doc__)", "shell=True) def OpenFolder(path): if sys.platform == 'win32': Run('explorer.exe', path) def Main(): '''No describe'''", "tests', action='store_true') parser.add_argument('-i','--file_input', help='data entry via file (path)') parser.add_argument('-o','--file_output', help='output data via file", "suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner() runner.run(suite) sys.exit() if args['file_input']: with open(args['file_input']) as json_file: VALUES_INPUT", "OpenFolder(path): if sys.platform == 'win32': Run('explorer.exe', path) def Main(): '''No describe''' global VALUES_INPUT", "time import json import sys,os import subprocess import argparse import unittest VALUES_INPUT =", "unittest.TextTestRunner() runner.run(suite) sys.exit() if args['file_input']: with open(args['file_input']) as json_file: VALUES_INPUT = json.load(json_file) param", "def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def Run(command, parameters=None): if(parameters != None): subprocess.Popen([command, parameters], shell=True)", "self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def Run(command, parameters=None): if(parameters != None): subprocess.Popen([command,", "#Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test' if __name__ == '__main__': parser =", "== True: suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner() runner.run(suite) sys.exit() if", "runner = unittest.TextTestRunner() runner.run(suite) sys.exit() if args['file_input']: with open(args['file_input']) as json_file: VALUES_INPUT =", "TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def Run(command, parameters=None): if(parameters", "= VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test' if __name__ ==", "unknown = parser.parse_known_args() args = vars(args) if args['description'] == True: print(Main.__doc__) sys.exit() if", "parser.add_argument('-o','--file_output', help='output data via file (path)') args, unknown = parser.parse_known_args() args = vars(args)", "= parser.parse_known_args() args = vars(args) if args['description'] == True: print(Main.__doc__) sys.exit() if args['tests']", "file (path)') parser.add_argument('-o','--file_output', help='output data via file (path)') args, unknown = parser.parse_known_args() args", "VALUES_OUTPUT = {} class TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO')", "def Main(): '''No describe''' global VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc')", "import unittest VALUES_INPUT = {} VALUES_OUTPUT = {} class TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(),", "path) def Main(): '''No describe''' global VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows')", "print(Main.__doc__) sys.exit() if args['tests'] == True: suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner =", "parameters], shell=True) else: subprocess.Popen(command, shell=True) def OpenFolder(path): if sys.platform == 'win32': Run('explorer.exe', path)", "runner.run(suite) sys.exit() if args['file_input']: with open(args['file_input']) as json_file: VALUES_INPUT = json.load(json_file) param =", "with open(args['file_output'], \"w\") as outfile: json_string = json.dumps(VALUES_OUTPUT, default=lambda o: o.__dict__, sort_keys=True, indent=2)", "{} class TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def Run(command,", "= 'test' if __name__ == '__main__': parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of program',", "VALUES_INPUT = json.load(json_file) param = ' '.join(unknown) Main() if args['file_output']: with open(args['file_output'], \"w\")", "import subprocess import argparse import unittest VALUES_INPUT = {} VALUES_OUTPUT = {} class", "parameters=None): if(parameters != None): subprocess.Popen([command, parameters], shell=True) else: subprocess.Popen(command, shell=True) def OpenFolder(path): if", "if sys.platform == 'win32': Run('explorer.exe', path) def Main(): '''No describe''' global VALUES_INPUT global", "Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test' if __name__ == '__main__': parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description',", "parser.add_argument('-i','--file_input', help='data entry via file (path)') parser.add_argument('-o','--file_output', help='output data via file (path)') args,", "shell=True) else: subprocess.Popen(command, shell=True) def OpenFolder(path): if sys.platform == 'win32': Run('explorer.exe', path) def", "import time import json import sys,os import subprocess import argparse import unittest VALUES_INPUT", "parser.add_argument('-d','--description', help='Description of program', action='store_true') parser.add_argument('-u','--tests', help='Execute tests', action='store_true') parser.add_argument('-i','--file_input', help='data entry via", "args = vars(args) if args['description'] == True: print(Main.__doc__) sys.exit() if args['tests'] == True:", "self.assertEqual('foo'.upper(), 'FOO') def Run(command, parameters=None): if(parameters != None): subprocess.Popen([command, parameters], shell=True) else: subprocess.Popen(command,", "= unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner() runner.run(suite) sys.exit() if args['file_input']: with open(args['file_input'])", "param = ' '.join(unknown) Main() if args['file_output']: with open(args['file_output'], \"w\") as outfile: json_string", "import json import sys,os import subprocess import argparse import unittest VALUES_INPUT = {}", "VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test' if __name__ == '__main__':", "True: print(Main.__doc__) sys.exit() if args['tests'] == True: suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner", "unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner() runner.run(suite) sys.exit() if args['file_input']: with open(args['file_input']) as", "Main() if args['file_output']: with open(args['file_output'], \"w\") as outfile: json_string = json.dumps(VALUES_OUTPUT, default=lambda o:", "' '.join(unknown) Main() if args['file_output']: with open(args['file_output'], \"w\") as outfile: json_string = json.dumps(VALUES_OUTPUT,", "unittest VALUES_INPUT = {} VALUES_OUTPUT = {} class TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO')", "test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def Run(command, parameters=None): if(parameters != None):", "== '__main__': parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of program', action='store_true') parser.add_argument('-u','--tests', help='Execute tests',", "= argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of program', action='store_true') parser.add_argument('-u','--tests', help='Execute tests', action='store_true') parser.add_argument('-i','--file_input', help='data", "= {} VALUES_OUTPUT = {} class TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self):", "def OpenFolder(path): if sys.platform == 'win32': Run('explorer.exe', path) def Main(): '''No describe''' global", "def Run(command, parameters=None): if(parameters != None): subprocess.Popen([command, parameters], shell=True) else: subprocess.Popen(command, shell=True) def", "VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test' if __name__", "help='data entry via file (path)') parser.add_argument('-o','--file_output', help='output data via file (path)') args, unknown", "__name__ == '__main__': parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of program', action='store_true') parser.add_argument('-u','--tests', help='Execute", "Run(command, parameters=None): if(parameters != None): subprocess.Popen([command, parameters], shell=True) else: subprocess.Popen(command, shell=True) def OpenFolder(path):", "subprocess import argparse import unittest VALUES_INPUT = {} VALUES_OUTPUT = {} class TestCases(unittest.TestCase):", "parser.add_argument('-u','--tests', help='Execute tests', action='store_true') parser.add_argument('-i','--file_input', help='data entry via file (path)') parser.add_argument('-o','--file_output', help='output data", "entry via file (path)') parser.add_argument('-o','--file_output', help='output data via file (path)') args, unknown =", "help='Execute tests', action='store_true') parser.add_argument('-i','--file_input', help='data entry via file (path)') parser.add_argument('-o','--file_output', help='output data via", "as json_file: VALUES_INPUT = json.load(json_file) param = ' '.join(unknown) Main() if args['file_output']: with", "file (path)') args, unknown = parser.parse_known_args() args = vars(args) if args['description'] == True:", "sys.exit() if args['file_input']: with open(args['file_input']) as json_file: VALUES_INPUT = json.load(json_file) param = '", "subprocess.Popen([command, parameters], shell=True) else: subprocess.Popen(command, shell=True) def OpenFolder(path): if sys.platform == 'win32': Run('explorer.exe',", "= vars(args) if args['description'] == True: print(Main.__doc__) sys.exit() if args['tests'] == True: suite", "if(parameters != None): subprocess.Popen([command, parameters], shell=True) else: subprocess.Popen(command, shell=True) def OpenFolder(path): if sys.platform", "class TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def Run(command, parameters=None):", "parser.parse_known_args() args = vars(args) if args['description'] == True: print(Main.__doc__) sys.exit() if args['tests'] ==", "vars(args) if args['description'] == True: print(Main.__doc__) sys.exit() if args['tests'] == True: suite =", "subprocess.Popen(command, shell=True) def OpenFolder(path): if sys.platform == 'win32': Run('explorer.exe', path) def Main(): '''No", "#OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test' if __name__ == '__main__': parser", "args['tests'] == True: suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner() runner.run(suite) sys.exit()", "args, unknown = parser.parse_known_args() args = vars(args) if args['description'] == True: print(Main.__doc__) sys.exit()", "VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test' if", "else: subprocess.Popen(command, shell=True) def OpenFolder(path): if sys.platform == 'win32': Run('explorer.exe', path) def Main():", "(path)') parser.add_argument('-o','--file_output', help='output data via file (path)') args, unknown = parser.parse_known_args() args =", "if args['tests'] == True: suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner() runner.run(suite)", "args['file_input']: with open(args['file_input']) as json_file: VALUES_INPUT = json.load(json_file) param = ' '.join(unknown) Main()", "open(args['file_output'], \"w\") as outfile: json_string = json.dumps(VALUES_OUTPUT, default=lambda o: o.__dict__, sort_keys=True, indent=2) outfile.write(json_string)", "#Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test' if __name__ == '__main__': parser = argparse.ArgumentParser(description=Main.__doc__)", "json import sys,os import subprocess import argparse import unittest VALUES_INPUT = {} VALUES_OUTPUT", "suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner() runner.run(suite) sys.exit() if args['file_input']: with", "= json.load(json_file) param = ' '.join(unknown) Main() if args['file_output']: with open(args['file_output'], \"w\") as", "= unittest.TextTestRunner() runner.run(suite) sys.exit() if args['file_input']: with open(args['file_input']) as json_file: VALUES_INPUT = json.load(json_file)", "sys.exit() if args['tests'] == True: suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner()", "'__main__': parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of program', action='store_true') parser.add_argument('-u','--tests', help='Execute tests', action='store_true')", "via file (path)') args, unknown = parser.parse_known_args() args = vars(args) if args['description'] ==", "'.join(unknown) Main() if args['file_output']: with open(args['file_output'], \"w\") as outfile: json_string = json.dumps(VALUES_OUTPUT, default=lambda", "args['file_output']: with open(args['file_output'], \"w\") as outfile: json_string = json.dumps(VALUES_OUTPUT, default=lambda o: o.__dict__, sort_keys=True,", "if __name__ == '__main__': parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of program', action='store_true') parser.add_argument('-u','--tests',", "www.google.com.br') #VALUES_OUTPUT['vartest'] = 'test' if __name__ == '__main__': parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description", "global VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest']", "action='store_true') parser.add_argument('-i','--file_input', help='data entry via file (path)') parser.add_argument('-o','--file_output', help='output data via file (path)')", "import argparse import unittest VALUES_INPUT = {} VALUES_OUTPUT = {} class TestCases(unittest.TestCase): def", "VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br') #VALUES_OUTPUT['vartest'] =", "argparse import unittest VALUES_INPUT = {} VALUES_OUTPUT = {} class TestCases(unittest.TestCase): def test_case_000(self):", "'test' if __name__ == '__main__': parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of program', action='store_true')", "'win32': Run('explorer.exe', path) def Main(): '''No describe''' global VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT =", "#VALUES_OUTPUT['vartest'] = 'test' if __name__ == '__main__': parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of", "open(args['file_input']) as json_file: VALUES_INPUT = json.load(json_file) param = ' '.join(unknown) Main() if args['file_output']:", "parser = argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of program', action='store_true') parser.add_argument('-u','--tests', help='Execute tests', action='store_true') parser.add_argument('-i','--file_input',", "json.load(json_file) param = ' '.join(unknown) Main() if args['file_output']: with open(args['file_output'], \"w\") as outfile:", "of program', action='store_true') parser.add_argument('-u','--tests', help='Execute tests', action='store_true') parser.add_argument('-i','--file_input', help='data entry via file (path)')", "{} VALUES_OUTPUT = {} class TestCases(unittest.TestCase): def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self): self.assertEqual('foo'.upper(),", "'''No describe''' global VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito", "if args['file_input']: with open(args['file_input']) as json_file: VALUES_INPUT = json.load(json_file) param = ' '.join(unknown)", "== True: print(Main.__doc__) sys.exit() if args['tests'] == True: suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\"))", "help='output data via file (path)') args, unknown = parser.parse_known_args() args = vars(args) if", "test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def Run(command, parameters=None): if(parameters != None): subprocess.Popen([command, parameters], shell=True) else:", "describe''' global VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT #OpenFolder(r'C:\\Windows') #Run(r'Calc') #Run(r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe','-incognito www.google.com.br')", "Run('explorer.exe', path) def Main(): '''No describe''' global VALUES_INPUT global VALUES_OUTPUT VALUES_OUTPUT = VALUES_INPUT", "suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner() runner.run(suite) sys.exit() if args['file_input']: with open(args['file_input']) as json_file:", "if args['file_output']: with open(args['file_output'], \"w\") as outfile: json_string = json.dumps(VALUES_OUTPUT, default=lambda o: o.__dict__,", "True: suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\")) suite.addTest(TestCases(\"test_case_001\")) runner = unittest.TextTestRunner() runner.run(suite) sys.exit() if args['file_input']:", "sys.platform == 'win32': Run('explorer.exe', path) def Main(): '''No describe''' global VALUES_INPUT global VALUES_OUTPUT", "None): subprocess.Popen([command, parameters], shell=True) else: subprocess.Popen(command, shell=True) def OpenFolder(path): if sys.platform == 'win32':", "program', action='store_true') parser.add_argument('-u','--tests', help='Execute tests', action='store_true') parser.add_argument('-i','--file_input', help='data entry via file (path)') parser.add_argument('-o','--file_output',", "help='Description of program', action='store_true') parser.add_argument('-u','--tests', help='Execute tests', action='store_true') parser.add_argument('-i','--file_input', help='data entry via file", "'FOO') def Run(command, parameters=None): if(parameters != None): subprocess.Popen([command, parameters], shell=True) else: subprocess.Popen(command, shell=True)", "argparse.ArgumentParser(description=Main.__doc__) parser.add_argument('-d','--description', help='Description of program', action='store_true') parser.add_argument('-u','--tests', help='Execute tests', action='store_true') parser.add_argument('-i','--file_input', help='data entry", "= ' '.join(unknown) Main() if args['file_output']: with open(args['file_output'], \"w\") as outfile: json_string =", "def test_case_000(self): self.assertEqual('foo'.upper(), 'FOO') def test_case_001(self): self.assertEqual('foo'.upper(), 'FOO') def Run(command, parameters=None): if(parameters !=", "args['description'] == True: print(Main.__doc__) sys.exit() if args['tests'] == True: suite = unittest.TestSuite() suite.addTest(TestCases(\"test_case_000\"))", "json_file: VALUES_INPUT = json.load(json_file) param = ' '.join(unknown) Main() if args['file_output']: with open(args['file_output']," ]
[ "(base_net, msize,) def main(): print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst = open(TMP_PATH, \"r\") rfdst =", "continue if not is_cn_ipv4(line): continue subnet = get_subnet(line) if not subnet: continue sts", "return False return True def is_cn_ipv4(line): if line.find(\"CN\") < 6: return False return", "\"%s\\n\" % subnet rfdst.write(sts) print(\"parse ok\") rfdst.close() fdst.close() os.remove(TMP_PATH) if __name__ == '__main__':", "base_net = tmplist[3] n = int(tmplist[4]) - 1 msize = 32 - len(bin(n))", "= open(TMP_PATH, \"r\") rfdst = open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\" % URL) rfdst.write(\"# China", "= line.replace(\"\\n\", \"\") if line[0:6] != \"apnic|\": continue if not is_ipv4(line): continue if", "get_subnet(line) if not subnet: continue sts = \"%s\\n\" % subnet rfdst.write(sts) print(\"parse ok\")", "tmplist[6] != \"allocated\": return None base_net = tmplist[3] n = int(tmplist[4]) - 1", "32 - len(bin(n)) + 2 return \"%s/%s\" % (base_net, msize,) def main(): print(\"downloading...\")", "rfdst.write(\"# %s\\n\" % URL) rfdst.write(\"# China IP address\\n\") for line in fdst: line", "if line.find(\"ipv4\") < 6: return False return True def is_cn_ipv4(line): if line.find(\"CN\") <", "- 1 msize = 32 - len(bin(n)) + 2 return \"%s/%s\" % (base_net,", "\"%s/%s\" % (base_net, msize,) def main(): print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst = open(TMP_PATH, \"r\")", "is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") < 6: return False return True def is_cn_ipv4(line): if", "line.replace(\"\\r\", \"\") line = line.replace(\"\\n\", \"\") if line[0:6] != \"apnic|\": continue if not", "\"\"\"从apnic获取中国IP范围\"\"\" import urllib.request, os URL = \"http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest\" TMP_PATH = \"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH", "get_subnet(line): tmplist = line.split(\"|\") if len(tmplist) != 7: return None if tmplist[6] !=", "= \"http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest\" TMP_PATH = \"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile", "RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile = open(TMP_PATH, \"wb\") response = urllib.request.urlopen(URL) rdata", "fdst = open(TMP_PATH, \"r\") rfdst = open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\" % URL) rfdst.write(\"#", "return True def get_subnet(line): tmplist = line.split(\"|\") if len(tmplist) != 7: return None", "def main(): print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst = open(TMP_PATH, \"r\") rfdst = open(RESULT_FILE_PATH, \"w\")", "= 32 - len(bin(n)) + 2 return \"%s/%s\" % (base_net, msize,) def main():", "!= \"apnic|\": continue if not is_ipv4(line): continue if not is_cn_ipv4(line): continue subnet =", "True def get_subnet(line): tmplist = line.split(\"|\") if len(tmplist) != 7: return None if", "\"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile = open(TMP_PATH, \"wb\") response", "len(tmplist) != 7: return None if tmplist[6] != \"allocated\": return None base_net =", "n = int(tmplist[4]) - 1 msize = 32 - len(bin(n)) + 2 return", "= line.replace(\"\\r\", \"\") line = line.replace(\"\\n\", \"\") if line[0:6] != \"apnic|\": continue if", "None if tmplist[6] != \"allocated\": return None base_net = tmplist[3] n = int(tmplist[4])", "7: return None if tmplist[6] != \"allocated\": return None base_net = tmplist[3] n", "if line[0:6] != \"apnic|\": continue if not is_ipv4(line): continue if not is_cn_ipv4(line): continue", "< 6: return False return True def get_subnet(line): tmplist = line.split(\"|\") if len(tmplist)", "tmpfile.write(rdata) tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") < 6: return False return True", "!= 7: return None if tmplist[6] != \"allocated\": return None base_net = tmplist[3]", "\"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") < 6: return False return True def is_cn_ipv4(line): if line.find(\"CN\")", "def is_cn_ipv4(line): if line.find(\"CN\") < 6: return False return True def get_subnet(line): tmplist", "not is_ipv4(line): continue if not is_cn_ipv4(line): continue subnet = get_subnet(line) if not subnet:", "if not subnet: continue sts = \"%s\\n\" % subnet rfdst.write(sts) print(\"parse ok\") rfdst.close()", "= int(tmplist[4]) - 1 msize = 32 - len(bin(n)) + 2 return \"%s/%s\"", "% URL) rfdst.write(\"# China IP address\\n\") for line in fdst: line = line.replace(\"\\r\",", "if line.find(\"CN\") < 6: return False return True def get_subnet(line): tmplist = line.split(\"|\")", "if not is_cn_ipv4(line): continue subnet = get_subnet(line) if not subnet: continue sts =", "1 msize = 32 - len(bin(n)) + 2 return \"%s/%s\" % (base_net, msize,)", "line = line.replace(\"\\r\", \"\") line = line.replace(\"\\n\", \"\") if line[0:6] != \"apnic|\": continue", "return None if tmplist[6] != \"allocated\": return None base_net = tmplist[3] n =", "response = urllib.request.urlopen(URL) rdata = response.read() tmpfile.write(rdata) tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\")", "line.find(\"ipv4\") < 6: return False return True def is_cn_ipv4(line): if line.find(\"CN\") < 6:", "TMP_PATH = \"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile = open(TMP_PATH,", "!= \"allocated\": return None base_net = tmplist[3] n = int(tmplist[4]) - 1 msize", "subnet = get_subnet(line) if not subnet: continue sts = \"%s\\n\" % subnet rfdst.write(sts)", "python3 \"\"\"从apnic获取中国IP范围\"\"\" import urllib.request, os URL = \"http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest\" TMP_PATH = \"./whitelist.tmp\" # 生成的最终白名单", "生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile = open(TMP_PATH, \"wb\") response = urllib.request.urlopen(URL)", "2 return \"%s/%s\" % (base_net, msize,) def main(): print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst =", "in fdst: line = line.replace(\"\\r\", \"\") line = line.replace(\"\\n\", \"\") if line[0:6] !=", "def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") < 6: return False return True def is_cn_ipv4(line):", "False return True def is_cn_ipv4(line): if line.find(\"CN\") < 6: return False return True", "#!/usr/bin/env python3 \"\"\"从apnic获取中国IP范围\"\"\" import urllib.request, os URL = \"http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest\" TMP_PATH = \"./whitelist.tmp\" #", "% (base_net, msize,) def main(): print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst = open(TMP_PATH, \"r\") rfdst", "continue if not is_ipv4(line): continue if not is_cn_ipv4(line): continue subnet = get_subnet(line) if", "continue subnet = get_subnet(line) if not subnet: continue sts = \"%s\\n\" % subnet", "urllib.request, os URL = \"http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest\" TMP_PATH = \"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\"", "\"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile = open(TMP_PATH, \"wb\") response = urllib.request.urlopen(URL) rdata = response.read()", "\"apnic|\": continue if not is_ipv4(line): continue if not is_cn_ipv4(line): continue subnet = get_subnet(line)", "fdst: line = line.replace(\"\\r\", \"\") line = line.replace(\"\\n\", \"\") if line[0:6] != \"apnic|\":", "tmpfile = open(TMP_PATH, \"wb\") response = urllib.request.urlopen(URL) rdata = response.read() tmpfile.write(rdata) tmpfile.close() def", "\"\") if line[0:6] != \"apnic|\": continue if not is_ipv4(line): continue if not is_cn_ipv4(line):", "return \"%s/%s\" % (base_net, msize,) def main(): print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst = open(TMP_PATH,", "China IP address\\n\") for line in fdst: line = line.replace(\"\\r\", \"\") line =", "\"r\") rfdst = open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\" % URL) rfdst.write(\"# China IP address\\n\")", "= \"%s\\n\" % subnet rfdst.write(sts) print(\"parse ok\") rfdst.close() fdst.close() os.remove(TMP_PATH) if __name__ ==", "msize = 32 - len(bin(n)) + 2 return \"%s/%s\" % (base_net, msize,) def", "= open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\" % URL) rfdst.write(\"# China IP address\\n\") for line", "not subnet: continue sts = \"%s\\n\" % subnet rfdst.write(sts) print(\"parse ok\") rfdst.close() fdst.close()", "response.read() tmpfile.write(rdata) tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") < 6: return False return", "rdata = response.read() tmpfile.write(rdata) tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") < 6: return", "tmplist[3] n = int(tmplist[4]) - 1 msize = 32 - len(bin(n)) + 2", "for line in fdst: line = line.replace(\"\\r\", \"\") line = line.replace(\"\\n\", \"\") if", "+ 2 return \"%s/%s\" % (base_net, msize,) def main(): print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst", "line = line.replace(\"\\n\", \"\") if line[0:6] != \"apnic|\": continue if not is_ipv4(line): continue", "def get_subnet(line): tmplist = line.split(\"|\") if len(tmplist) != 7: return None if tmplist[6]", "= \"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile = open(TMP_PATH, \"wb\") response = urllib.request.urlopen(URL) rdata =", "rfdst = open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\" % URL) rfdst.write(\"# China IP address\\n\") for", "True def is_cn_ipv4(line): if line.find(\"CN\") < 6: return False return True def get_subnet(line):", "if not is_ipv4(line): continue if not is_cn_ipv4(line): continue subnet = get_subnet(line) if not", "line.replace(\"\\n\", \"\") if line[0:6] != \"apnic|\": continue if not is_ipv4(line): continue if not", "< 6: return False return True def is_cn_ipv4(line): if line.find(\"CN\") < 6: return", "6: return False return True def get_subnet(line): tmplist = line.split(\"|\") if len(tmplist) !=", "line[0:6] != \"apnic|\": continue if not is_ipv4(line): continue if not is_cn_ipv4(line): continue subnet", "import urllib.request, os URL = \"http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest\" TMP_PATH = \"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH =", "is_cn_ipv4(line): continue subnet = get_subnet(line) if not subnet: continue sts = \"%s\\n\" %", "\"w\") rfdst.write(\"# %s\\n\" % URL) rfdst.write(\"# China IP address\\n\") for line in fdst:", "continue sts = \"%s\\n\" % subnet rfdst.write(sts) print(\"parse ok\") rfdst.close() fdst.close() os.remove(TMP_PATH) if", "rfdst.write(\"# China IP address\\n\") for line in fdst: line = line.replace(\"\\r\", \"\") line", "sts = \"%s\\n\" % subnet rfdst.write(sts) print(\"parse ok\") rfdst.close() fdst.close() os.remove(TMP_PATH) if __name__", "not is_cn_ipv4(line): continue subnet = get_subnet(line) if not subnet: continue sts = \"%s\\n\"", "line.find(\"CN\") < 6: return False return True def get_subnet(line): tmplist = line.split(\"|\") if", "is_cn_ipv4(line): if line.find(\"CN\") < 6: return False return True def get_subnet(line): tmplist =", "\"\") line = line.replace(\"\\n\", \"\") if line[0:6] != \"apnic|\": continue if not is_ipv4(line):", "%s\\n\" % URL) rfdst.write(\"# China IP address\\n\") for line in fdst: line =", "= tmplist[3] n = int(tmplist[4]) - 1 msize = 32 - len(bin(n)) +", "return None base_net = tmplist[3] n = int(tmplist[4]) - 1 msize = 32", "False return True def get_subnet(line): tmplist = line.split(\"|\") if len(tmplist) != 7: return", "% subnet rfdst.write(sts) print(\"parse ok\") rfdst.close() fdst.close() os.remove(TMP_PATH) if __name__ == '__main__': main()", "- len(bin(n)) + 2 return \"%s/%s\" % (base_net, msize,) def main(): print(\"downloading...\") get_remote_file()", "= open(TMP_PATH, \"wb\") response = urllib.request.urlopen(URL) rdata = response.read() tmpfile.write(rdata) tmpfile.close() def is_ipv4(line):", "line.split(\"|\") if len(tmplist) != 7: return None if tmplist[6] != \"allocated\": return None", "open(TMP_PATH, \"wb\") response = urllib.request.urlopen(URL) rdata = response.read() tmpfile.write(rdata) tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\"", "\"http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest\" TMP_PATH = \"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile =", "return True def is_cn_ipv4(line): if line.find(\"CN\") < 6: return False return True def", "is_ipv4(line): continue if not is_cn_ipv4(line): continue subnet = get_subnet(line) if not subnet: continue", "address\\n\") for line in fdst: line = line.replace(\"\\r\", \"\") line = line.replace(\"\\n\", \"\")", "get_remote_file(): tmpfile = open(TMP_PATH, \"wb\") response = urllib.request.urlopen(URL) rdata = response.read() tmpfile.write(rdata) tmpfile.close()", "= line.split(\"|\") if len(tmplist) != 7: return None if tmplist[6] != \"allocated\": return", "open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\" % URL) rfdst.write(\"# China IP address\\n\") for line in", "# 生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile = open(TMP_PATH, \"wb\") response =", "= get_subnet(line) if not subnet: continue sts = \"%s\\n\" % subnet rfdst.write(sts) print(\"parse", "\"allocated\": return None base_net = tmplist[3] n = int(tmplist[4]) - 1 msize =", "subnet: continue sts = \"%s\\n\" % subnet rfdst.write(sts) print(\"parse ok\") rfdst.close() fdst.close() os.remove(TMP_PATH)", "= response.read() tmpfile.write(rdata) tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") < 6: return False", "main(): print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst = open(TMP_PATH, \"r\") rfdst = open(RESULT_FILE_PATH, \"w\") rfdst.write(\"#", "get_remote_file() print(\"parsing...\") fdst = open(TMP_PATH, \"r\") rfdst = open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\" %", "os URL = \"http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest\" TMP_PATH = \"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def", "int(tmplist[4]) - 1 msize = 32 - len(bin(n)) + 2 return \"%s/%s\" %", "tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") < 6: return False return True def", "URL = \"http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest\" TMP_PATH = \"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def get_remote_file():", "def get_remote_file(): tmpfile = open(TMP_PATH, \"wb\") response = urllib.request.urlopen(URL) rdata = response.read() tmpfile.write(rdata)", "len(bin(n)) + 2 return \"%s/%s\" % (base_net, msize,) def main(): print(\"downloading...\") get_remote_file() print(\"parsing...\")", "open(TMP_PATH, \"r\") rfdst = open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\" % URL) rfdst.write(\"# China IP", "\"wb\") response = urllib.request.urlopen(URL) rdata = response.read() tmpfile.write(rdata) tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if", "urllib.request.urlopen(URL) rdata = response.read() tmpfile.write(rdata) tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") < 6:", "line in fdst: line = line.replace(\"\\r\", \"\") line = line.replace(\"\\n\", \"\") if line[0:6]", "return False return True def get_subnet(line): tmplist = line.split(\"|\") if len(tmplist) != 7:", "print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst = open(TMP_PATH, \"r\") rfdst = open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\"", "= urllib.request.urlopen(URL) rdata = response.read() tmpfile.write(rdata) tmpfile.close() def is_ipv4(line): \"\"\"检查是否是IPv4\"\"\" if line.find(\"ipv4\") <", "6: return False return True def is_cn_ipv4(line): if line.find(\"CN\") < 6: return False", "tmplist = line.split(\"|\") if len(tmplist) != 7: return None if tmplist[6] != \"allocated\":", "if tmplist[6] != \"allocated\": return None base_net = tmplist[3] n = int(tmplist[4]) -", "print(\"parsing...\") fdst = open(TMP_PATH, \"r\") rfdst = open(RESULT_FILE_PATH, \"w\") rfdst.write(\"# %s\\n\" % URL)", "IP address\\n\") for line in fdst: line = line.replace(\"\\r\", \"\") line = line.replace(\"\\n\",", "msize,) def main(): print(\"downloading...\") get_remote_file() print(\"parsing...\") fdst = open(TMP_PATH, \"r\") rfdst = open(RESULT_FILE_PATH,", "= \"./whitelist.tmp\" # 生成的最终白名单 RESULT_FILE_PATH = \"./fdslight_etc/whitelist.txt\" def get_remote_file(): tmpfile = open(TMP_PATH, \"wb\")", "URL) rfdst.write(\"# China IP address\\n\") for line in fdst: line = line.replace(\"\\r\", \"\")", "None base_net = tmplist[3] n = int(tmplist[4]) - 1 msize = 32 -", "if len(tmplist) != 7: return None if tmplist[6] != \"allocated\": return None base_net" ]
[ "#---------------------------------------------------------- def BuildBST(A, left, right) : if(left > right): return None newNode =", "count = 0 def kthSmallestInBST(root, k): global count if (not root): return None", "if a binary tree is a binary search tree def IsBST3(root): if root", "3, 4, 5, 6, 7] root = BuildBST(A, 0, len(A) - 1) print", "root): return None left = kthSmallestInBST(root.left, k) if (left): return left count +=", "root): if root == None: return 0 leftSum = self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right)", "5, 6, 7] root = BuildBST(A, 0, len(A) - 1) print \"\\ncreating BST\"", "return root.val if leftSum > 0 and rightSum > 0: self.maxValue = max(self.maxValue,", "and FindMax(root.getLeft()) > root.get_data()) return 0 # false if the min of the", "if root == None: return 0 leftSum = self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right) if", "+= 1 if (count == k): return root return kthSmallestInBST(root.right, k) #------------------------------------------------------- def", "= None newNode.right = None else : mid = left + (right -", "left + (right - left) / 2 newNode.data = A[mid] newNode.left = BuildBST(A,", "def SortedListToBST(ll, start, end): if(start > end): return None # same as (start+end)/2,", "left count += 1 if (count == k): return root return kthSmallestInBST(root.right, k)", "left or right is not a BST if (not IsBST3(root.getLeft()) or not IsBST3(root.getRight()))", "newNode = Node() if(not newNode) : print(\"Memory Error\") return if(left == right): newNode.data", "leftSum < 0 and rightSum < 0: self.maxValue = max(self.maxValue, root.val) return root.val", "0, len(A) - 1) print \"\\ncreating BST\" printBST(root) #------------------------------------------------------- count = 0 def", "= left root.right = SortedListToBST(ll, mid + 1, end) return root def convertSortedListToBST(ll,", "(not root): return None left = kthSmallestInBST(root.left, k) if (left): return left count", "> root.get_data()) return 0 # false if the min of the right is", "this function. p = head #We can use two-pointer logic to find the", "None and FindMin(root.getRight()) < root.get_data()) return 0 # false if, recursively, the left", "right is not a BST if (not IsBST3(root.getLeft()) or not IsBST3(root.getRight())) return 0", "# false if the max of the left is > than root if", "count if (not root): return None left = kthSmallestInBST(root.left, k) if (left): return", "as (start+end)/2, avoids overflow mid = start + (end - start) // 2", "tree def IsBST3(root): if root == None: return 1 # false if the", "IsBST3(root.getRight())) return 0 # passing all that, it's a BST return 1 #METHOD", "right) return newNode if __name__ == \"__main__\": # create the sample BST A", "+ (end - start) // 2 left = SortedListToBST(ll, start, mid - 1)", ": narasimha class Answer: def maxPathSum(self, root): self.maxValue = float(\"-inf\") self.maxPathSumRec(root) return self.maxValue", "previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not head or not head.next): return head temp =", "- 1) root = BSTNode(ll.head.data) ll.deleteBeg() root.left = left root.right = SortedListToBST(ll, mid", "None else : mid = left + (right - left) / 2 newNode.data", "Linked Lists chapter for this function. p = head #We can use two-pointer", "not head.next): return head temp = FindMiddleNode(head) # Refer Linked Lists chapter for", "BuildBST(A, left, right) : if(left > right): return None newNode = Node() if(not", "end): if(start > end): return None # same as (start+end)/2, avoids overflow mid", "while(p.next != temp): p = p.next p.next = None q = temp.next temp.next", "2 newNode.data = A[mid] newNode.left = BuildBST(A, left, mid - 1) newNode.right =", "isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not head or not head.next): return head temp", "None: return 1 # false if the max of the left is >", "newNode.left = BuildBST(A, left, mid - 1) newNode.right = BuildBST(A, mid + 1,", "(root.getRight() != None and FindMin(root.getRight()) < root.get_data()) return 0 # false if, recursively,", "< lastNode[0]: return 0 previousValue = root.get_data() return isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head):", "root if (root.getLeft() != None and FindMax(root.getLeft()) > root.get_data()) return 0 # false", "2 def isBST4(root, previousValue=[NEG_INFINITY]): if root is None: return 1 if not isBST4(root.getLeft(),", "BuildBST(A, 0, len(A) - 1) print \"\\ncreating BST\" printBST(root) #------------------------------------------------------- count = 0", "return newNode if __name__ == \"__main__\": # create the sample BST A =", "return 1 #METHOD 2 def isBST4(root, previousValue=[NEG_INFINITY]): if root is None: return 1", "the sample BST A = [2, 3, 4, 5, 6, 7] root =", "#------------------------------------------------------ #PROBLEM 96 : narasimha class Answer: def maxPathSum(self, root): self.maxValue = float(\"-inf\")", "None newNode.right = None else : mid = left + (right - left)", "1, right) return newNode if __name__ == \"__main__\": # create the sample BST", "left = SortedListToBST(ll, start, mid - 1) root = BSTNode(ll.head.data) ll.deleteBeg() root.left =", ": return SortedListToBST(ll, 0, n - 1) #------------------------------------------------------ #PROBLEM 96 : narasimha class", "= SortedListToBST(ll, start, mid - 1) root = BSTNode(ll.head.data) ll.deleteBeg() root.left = left", "root def convertSortedListToBST(ll, n) : return SortedListToBST(ll, 0, n - 1) #------------------------------------------------------ #PROBLEM", "left is > than root if (root.getLeft() != None and FindMax(root.getLeft()) > root.get_data())", "previousValue=[NEG_INFINITY]): if root is None: return 1 if not isBST4(root.getLeft(), previousValue): return False", "recursively, the left or right is not a BST if (not IsBST3(root.getLeft()) or", "left = kthSmallestInBST(root.left, k) if (left): return left count += 1 if (count", "< 0: self.maxValue = max(self.maxValue, root.val) return root.val if leftSum > 0 and", "return 0 previousValue = root.get_data() return isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not head", "root.val) return root.val if leftSum > 0 and rightSum > 0: self.maxValue =", "print(\"Memory Error\") return if(left == right): newNode.data = A[left] newNode.left = None newNode.right", "that, it's a BST return 1 #METHOD 2 def isBST4(root, previousValue=[NEG_INFINITY]): if root", "0 # passing all that, it's a BST return 1 #METHOD 2 def", "return 1 # false if the max of the left is > than", "Refer Linked Lists chapter for this function. p = head #We can use", "# false if the min of the right is <= than root if", "of the left is > than root if (root.getLeft() != None and FindMax(root.getLeft())", "newNode.right = None else : mid = left + (right - left) /", "is a binary search tree def IsBST3(root): if root == None: return 1", "# Returns true if a binary tree is a binary search tree def", "start + (end - start) // 2 left = SortedListToBST(ll, start, mid -", "newNode.right = BuildBST(A, mid + 1, right) return newNode if __name__ == \"__main__\":", "4, 5, 6, 7] root = BuildBST(A, 0, len(A) - 1) print \"\\ncreating", "1) print \"\\ncreating BST\" printBST(root) #------------------------------------------------------- count = 0 def kthSmallestInBST(root, k): global", "= BSTNode(ll.head.data) ll.deleteBeg() root.left = left root.right = SortedListToBST(ll, mid + 1, end)", "rightSum > 0: self.maxValue = max(self.maxValue, root.val + leftSum + rightSum) maxValueUp =", "Lists chapter for this function. p = head #We can use two-pointer logic", "Returns true if a binary tree is a binary search tree def IsBST3(root):", "= self.maxPathSumRec(root.right) if leftSum < 0 and rightSum < 0: self.maxValue = max(self.maxValue,", "if (not IsBST3(root.getLeft()) or not IsBST3(root.getRight())) return 0 # passing all that, it's", "return temp #---------------------------------------------------------- def BuildBST(A, left, right) : if(left > right): return None", "def maxPathSum(self, root): self.maxValue = float(\"-inf\") self.maxPathSumRec(root) return self.maxValue def maxPathSumRec(self, root): if", "mid = left + (right - left) / 2 newNode.data = A[mid] newNode.left", "mid = start + (end - start) // 2 left = SortedListToBST(ll, start,", "the max of the left is > than root if (root.getLeft() != None", "root if (root.getRight() != None and FindMin(root.getRight()) < root.get_data()) return 0 # false", "the left is > than root if (root.getLeft() != None and FindMax(root.getLeft()) >", "return kthSmallestInBST(root.right, k) #------------------------------------------------------- def SortedListToBST(ll, start, end): if(start > end): return None", "left root.right = SortedListToBST(ll, mid + 1, end) return root def convertSortedListToBST(ll, n)", "root.get_data() < lastNode[0]: return 0 previousValue = root.get_data() return isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def", "right): return None newNode = Node() if(not newNode) : print(\"Memory Error\") return if(left", "a binary tree is a binary search tree def IsBST3(root): if root ==", "0 and rightSum > 0: self.maxValue = max(self.maxValue, root.val + leftSum + rightSum)", "FindMin(root.getRight()) < root.get_data()) return 0 # false if, recursively, the left or right", "= temp.next temp.next = None temp.prev = DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q) return temp", "None: return 0 leftSum = self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right) if leftSum < 0", "// 2 left = SortedListToBST(ll, start, mid - 1) root = BSTNode(ll.head.data) ll.deleteBeg()", "temp): p = p.next p.next = None q = temp.next temp.next = None", "and FindMin(root.getRight()) < root.get_data()) return 0 # false if, recursively, the left or", "BST return 1 #METHOD 2 def isBST4(root, previousValue=[NEG_INFINITY]): if root is None: return", "temp = FindMiddleNode(head) # Refer Linked Lists chapter for this function. p =", "not a BST if (not IsBST3(root.getLeft()) or not IsBST3(root.getRight())) return 0 # passing", "= 0 def kthSmallestInBST(root, k): global count if (not root): return None left", "kthSmallestInBST(root.left, k) if (left): return left count += 1 if (count == k):", "SortedListToBST(ll, mid + 1, end) return root def convertSortedListToBST(ll, n) : return SortedListToBST(ll,", "FindMiddleNode(head) # Refer Linked Lists chapter for this function. p = head #We", "\"__main__\": # create the sample BST A = [2, 3, 4, 5, 6,", "> than root if (root.getLeft() != None and FindMax(root.getLeft()) > root.get_data()) return 0", "= float(\"-inf\") self.maxPathSumRec(root) return self.maxValue def maxPathSumRec(self, root): if root == None: return", "head or not head.next): return head temp = FindMiddleNode(head) # Refer Linked Lists", "if(left > right): return None newNode = Node() if(not newNode) : print(\"Memory Error\")", "False if root.get_data() < lastNode[0]: return 0 previousValue = root.get_data() return isBST4(root.getRight(), previousValue)", "if (left): return left count += 1 if (count == k): return root", "A[mid] newNode.left = BuildBST(A, left, mid - 1) newNode.right = BuildBST(A, mid +", "= left + (right - left) / 2 newNode.data = A[mid] newNode.left =", "return False if root.get_data() < lastNode[0]: return 0 previousValue = root.get_data() return isBST4(root.getRight(),", "else : mid = left + (right - left) / 2 newNode.data =", "None # same as (start+end)/2, avoids overflow mid = start + (end -", "logic to find the middle node while(p.next != temp): p = p.next p.next", "true if a binary tree is a binary search tree def IsBST3(root): if", "it's a BST return 1 #METHOD 2 def isBST4(root, previousValue=[NEG_INFINITY]): if root is", "false if the min of the right is <= than root if (root.getRight()", "(root.getLeft() != None and FindMax(root.getLeft()) > root.get_data()) return 0 # false if the", "- 1) newNode.right = BuildBST(A, mid + 1, right) return newNode if __name__", "the middle node while(p.next != temp): p = p.next p.next = None q", "<= than root if (root.getRight() != None and FindMin(root.getRight()) < root.get_data()) return 0", "function. p = head #We can use two-pointer logic to find the middle", "[2, 3, 4, 5, 6, 7] root = BuildBST(A, 0, len(A) - 1)", "(start+end)/2, avoids overflow mid = start + (end - start) // 2 left", "1) newNode.right = BuildBST(A, mid + 1, right) return newNode if __name__ ==", "kthSmallestInBST(root, k): global count if (not root): return None left = kthSmallestInBST(root.left, k)", "print \"\\ncreating BST\" printBST(root) #------------------------------------------------------- count = 0 def kthSmallestInBST(root, k): global count", "# false if, recursively, the left or right is not a BST if", "create the sample BST A = [2, 3, 4, 5, 6, 7] root", "SortedListToBST(ll, 0, n - 1) #------------------------------------------------------ #PROBLEM 96 : narasimha class Answer: def", "A[left] newNode.left = None newNode.right = None else : mid = left +", "left) / 2 newNode.data = A[mid] newNode.left = BuildBST(A, left, mid - 1)", "newNode.data = A[mid] newNode.left = BuildBST(A, left, mid - 1) newNode.right = BuildBST(A,", "end): return None # same as (start+end)/2, avoids overflow mid = start +", "if (count == k): return root return kthSmallestInBST(root.right, k) #------------------------------------------------------- def SortedListToBST(ll, start,", "def DLLtoBalancedBST(head): if(not head or not head.next): return head temp = FindMiddleNode(head) #", "!= None and FindMax(root.getLeft()) > root.get_data()) return 0 # false if the min", "temp.next temp.next = None temp.prev = DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q) return temp #----------------------------------------------------------", "if(not newNode) : print(\"Memory Error\") return if(left == right): newNode.data = A[left] newNode.left", "all that, it's a BST return 1 #METHOD 2 def isBST4(root, previousValue=[NEG_INFINITY]): if", "maxPathSumRec(self, root): if root == None: return 0 leftSum = self.maxPathSumRec(root.left) rightSum =", "isBST4(root.getLeft(), previousValue): return False if root.get_data() < lastNode[0]: return 0 previousValue = root.get_data()", "0 leftSum = self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right) if leftSum < 0 and rightSum", "BST if (not IsBST3(root.getLeft()) or not IsBST3(root.getRight())) return 0 # passing all that,", "def BuildBST(A, left, right) : if(left > right): return None newNode = Node()", "the right is <= than root if (root.getRight() != None and FindMin(root.getRight()) <", "IsBST3(root.getLeft()) or not IsBST3(root.getRight())) return 0 # passing all that, it's a BST", "= max(self.maxValue, root.val + leftSum + rightSum) maxValueUp = max(leftSum, rightSum) + root.val", "float(\"-inf\") self.maxPathSumRec(root) return self.maxValue def maxPathSumRec(self, root): if root == None: return 0", "self.maxValue def maxPathSumRec(self, root): if root == None: return 0 leftSum = self.maxPathSumRec(root.left)", "None left = kthSmallestInBST(root.left, k) if (left): return left count += 1 if", "= self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right) if leftSum < 0 and rightSum < 0:", "the min of the right is <= than root if (root.getRight() != None", "the left or right is not a BST if (not IsBST3(root.getLeft()) or not", "is > than root if (root.getLeft() != None and FindMax(root.getLeft()) > root.get_data()) return", "return isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not head or not head.next): return head", "None q = temp.next temp.next = None temp.prev = DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q)", "/ 2 newNode.data = A[mid] newNode.left = BuildBST(A, left, mid - 1) newNode.right", "a binary search tree def IsBST3(root): if root == None: return 1 #", "root.val if leftSum > 0 and rightSum > 0: self.maxValue = max(self.maxValue, root.val", "kthSmallestInBST(root.right, k) #------------------------------------------------------- def SortedListToBST(ll, start, end): if(start > end): return None #", "and rightSum > 0: self.maxValue = max(self.maxValue, root.val + leftSum + rightSum) maxValueUp", "middle node while(p.next != temp): p = p.next p.next = None q =", "if not isBST4(root.getLeft(), previousValue): return False if root.get_data() < lastNode[0]: return 0 previousValue", "BuildBST(A, mid + 1, right) return newNode if __name__ == \"__main__\": # create", "return 0 leftSum = self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right) if leftSum < 0 and", "FindMax(root.getLeft()) > root.get_data()) return 0 # false if the min of the right", "(end - start) // 2 left = SortedListToBST(ll, start, mid - 1) root", "is None: return 1 if not isBST4(root.getLeft(), previousValue): return False if root.get_data() <", "max(self.maxValue, root.val + leftSum + rightSum) maxValueUp = max(leftSum, rightSum) + root.val self.maxValue", "two-pointer logic to find the middle node while(p.next != temp): p = p.next", "q = temp.next temp.next = None temp.prev = DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q) return", "false if the max of the left is > than root if (root.getLeft()", "+ 1, right) return newNode if __name__ == \"__main__\": # create the sample", "= None else : mid = left + (right - left) / 2", "a BST if (not IsBST3(root.getLeft()) or not IsBST3(root.getRight())) return 0 # passing all", "def IsBST3(root): if root == None: return 1 # false if the max", "count += 1 if (count == k): return root return kthSmallestInBST(root.right, k) #-------------------------------------------------------", "#We can use two-pointer logic to find the middle node while(p.next != temp):", "if, recursively, the left or right is not a BST if (not IsBST3(root.getLeft())", "left, right) : if(left > right): return None newNode = Node() if(not newNode)", "#------------------------------------------------------- count = 0 def kthSmallestInBST(root, k): global count if (not root): return", "if (not root): return None left = kthSmallestInBST(root.left, k) if (left): return left", "(right - left) / 2 newNode.data = A[mid] newNode.left = BuildBST(A, left, mid", "= p.next p.next = None q = temp.next temp.next = None temp.prev =", "printBST(root) #------------------------------------------------------- count = 0 def kthSmallestInBST(root, k): global count if (not root):", "if the min of the right is <= than root if (root.getRight() !=", "= start + (end - start) // 2 left = SortedListToBST(ll, start, mid", "SortedListToBST(ll, start, end): if(start > end): return None # same as (start+end)/2, avoids", "to find the middle node while(p.next != temp): p = p.next p.next =", ": print(\"Memory Error\") return if(left == right): newNode.data = A[left] newNode.left = None", "< 0 and rightSum < 0: self.maxValue = max(self.maxValue, root.val) return root.val if", "< root.get_data()) return 0 # false if, recursively, the left or right is", "1 #METHOD 2 def isBST4(root, previousValue=[NEG_INFINITY]): if root is None: return 1 if", "= DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q) return temp #---------------------------------------------------------- def BuildBST(A, left, right) :", "6, 7] root = BuildBST(A, 0, len(A) - 1) print \"\\ncreating BST\" printBST(root)", "newNode.left = None newNode.right = None else : mid = left + (right", "temp.next = None temp.prev = DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q) return temp #---------------------------------------------------------- def", "= root.get_data() return isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not head or not head.next):", "root == None: return 0 leftSum = self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right) if leftSum", "mid + 1, end) return root def convertSortedListToBST(ll, n) : return SortedListToBST(ll, 0,", "!= None and FindMin(root.getRight()) < root.get_data()) return 0 # false if, recursively, the", "overflow mid = start + (end - start) // 2 left = SortedListToBST(ll,", "0, n - 1) #------------------------------------------------------ #PROBLEM 96 : narasimha class Answer: def maxPathSum(self,", "self.maxValue = max(self.maxValue, root.val) return root.val if leftSum > 0 and rightSum >", "start) // 2 left = SortedListToBST(ll, start, mid - 1) root = BSTNode(ll.head.data)", "than root if (root.getLeft() != None and FindMax(root.getLeft()) > root.get_data()) return 0 #", "max of the left is > than root if (root.getLeft() != None and", "> 0: self.maxValue = max(self.maxValue, root.val + leftSum + rightSum) maxValueUp = max(leftSum,", "not IsBST3(root.getRight())) return 0 # passing all that, it's a BST return 1", "self.maxPathSumRec(root.right) if leftSum < 0 and rightSum < 0: self.maxValue = max(self.maxValue, root.val)", "# create the sample BST A = [2, 3, 4, 5, 6, 7]", "head temp = FindMiddleNode(head) # Refer Linked Lists chapter for this function. p", "mid - 1) newNode.right = BuildBST(A, mid + 1, right) return newNode if", "== right): newNode.data = A[left] newNode.left = None newNode.right = None else :", "previousValue): return False if root.get_data() < lastNode[0]: return 0 previousValue = root.get_data() return", "= head #We can use two-pointer logic to find the middle node while(p.next", "(count == k): return root return kthSmallestInBST(root.right, k) #------------------------------------------------------- def SortedListToBST(ll, start, end):", "root == None: return 1 # false if the max of the left", "return None newNode = Node() if(not newNode) : print(\"Memory Error\") return if(left ==", "if __name__ == \"__main__\": # create the sample BST A = [2, 3,", "0 and rightSum < 0: self.maxValue = max(self.maxValue, root.val) return root.val if leftSum", "0 # false if, recursively, the left or right is not a BST", "start, mid - 1) root = BSTNode(ll.head.data) ll.deleteBeg() root.left = left root.right =", "- 1) print \"\\ncreating BST\" printBST(root) #------------------------------------------------------- count = 0 def kthSmallestInBST(root, k):", "#----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not head or not head.next): return head temp = FindMiddleNode(head)", "of the right is <= than root if (root.getRight() != None and FindMin(root.getRight())", "BSTNode(ll.head.data) ll.deleteBeg() root.left = left root.right = SortedListToBST(ll, mid + 1, end) return", "None: return 1 if not isBST4(root.getLeft(), previousValue): return False if root.get_data() < lastNode[0]:", "if leftSum < 0 and rightSum < 0: self.maxValue = max(self.maxValue, root.val) return", "if leftSum > 0 and rightSum > 0: self.maxValue = max(self.maxValue, root.val +", "root.get_data() return isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not head or not head.next): return", "return root return kthSmallestInBST(root.right, k) #------------------------------------------------------- def SortedListToBST(ll, start, end): if(start > end):", "return self.maxValue def maxPathSumRec(self, root): if root == None: return 0 leftSum =", "if(not head or not head.next): return head temp = FindMiddleNode(head) # Refer Linked", "right) : if(left > right): return None newNode = Node() if(not newNode) :", "class Answer: def maxPathSum(self, root): self.maxValue = float(\"-inf\") self.maxPathSumRec(root) return self.maxValue def maxPathSumRec(self,", "= None q = temp.next temp.next = None temp.prev = DLLtoBalancedBST(head) temp.next =", "DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q) return temp #---------------------------------------------------------- def BuildBST(A, left, right) : if(left", "lastNode[0]: return 0 previousValue = root.get_data() return isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not", "1) #------------------------------------------------------ #PROBLEM 96 : narasimha class Answer: def maxPathSum(self, root): self.maxValue =", "+ leftSum + rightSum) maxValueUp = max(leftSum, rightSum) + root.val self.maxValue = max(self.maxValue,", "avoids overflow mid = start + (end - start) // 2 left =", "- start) // 2 left = SortedListToBST(ll, start, mid - 1) root =", "k) #------------------------------------------------------- def SortedListToBST(ll, start, end): if(start > end): return None # same", "(not IsBST3(root.getLeft()) or not IsBST3(root.getRight())) return 0 # passing all that, it's a", "+ 1, end) return root def convertSortedListToBST(ll, n) : return SortedListToBST(ll, 0, n", "or not IsBST3(root.getRight())) return 0 # passing all that, it's a BST return", "root is None: return 1 if not isBST4(root.getLeft(), previousValue): return False if root.get_data()", "root.right = SortedListToBST(ll, mid + 1, end) return root def convertSortedListToBST(ll, n) :", "- 1) #------------------------------------------------------ #PROBLEM 96 : narasimha class Answer: def maxPathSum(self, root): self.maxValue", "= BuildBST(A, left, mid - 1) newNode.right = BuildBST(A, mid + 1, right)", "convertSortedListToBST(ll, n) : return SortedListToBST(ll, 0, n - 1) #------------------------------------------------------ #PROBLEM 96 :", "find the middle node while(p.next != temp): p = p.next p.next = None", "self.maxValue = float(\"-inf\") self.maxPathSumRec(root) return self.maxValue def maxPathSumRec(self, root): if root == None:", "== k): return root return kthSmallestInBST(root.right, k) #------------------------------------------------------- def SortedListToBST(ll, start, end): if(start", "= A[left] newNode.left = None newNode.right = None else : mid = left", "__name__ == \"__main__\": # create the sample BST A = [2, 3, 4,", "None and FindMax(root.getLeft()) > root.get_data()) return 0 # false if the min of", "BST A = [2, 3, 4, 5, 6, 7] root = BuildBST(A, 0,", "is not a BST if (not IsBST3(root.getLeft()) or not IsBST3(root.getRight())) return 0 #", "return 0 # false if, recursively, the left or right is not a", "1 if not isBST4(root.getLeft(), previousValue): return False if root.get_data() < lastNode[0]: return 0", "> 0 and rightSum > 0: self.maxValue = max(self.maxValue, root.val + leftSum +", "start, end): if(start > end): return None # same as (start+end)/2, avoids overflow", "+ rightSum) maxValueUp = max(leftSum, rightSum) + root.val self.maxValue = max(self.maxValue, maxValueUp) return", "1 # false if the max of the left is > than root", "= kthSmallestInBST(root.left, k) if (left): return left count += 1 if (count ==", "binary search tree def IsBST3(root): if root == None: return 1 # false", "not isBST4(root.getLeft(), previousValue): return False if root.get_data() < lastNode[0]: return 0 previousValue =", "#------------------------------------------------------- def SortedListToBST(ll, start, end): if(start > end): return None # same as", "is <= than root if (root.getRight() != None and FindMin(root.getRight()) < root.get_data()) return", "DLLtoBalancedBST(q) return temp #---------------------------------------------------------- def BuildBST(A, left, right) : if(left > right): return", "#PROBLEM 96 : narasimha class Answer: def maxPathSum(self, root): self.maxValue = float(\"-inf\") self.maxPathSumRec(root)", "#METHOD 2 def isBST4(root, previousValue=[NEG_INFINITY]): if root is None: return 1 if not", "return None left = kthSmallestInBST(root.left, k) if (left): return left count += 1", "None newNode = Node() if(not newNode) : print(\"Memory Error\") return if(left == right):", "newNode.data = A[left] newNode.left = None newNode.right = None else : mid =", "self.maxValue = max(self.maxValue, root.val + leftSum + rightSum) maxValueUp = max(leftSum, rightSum) +", "return 0 # false if the min of the right is <= than", "k) if (left): return left count += 1 if (count == k): return", "ll.deleteBeg() root.left = left root.right = SortedListToBST(ll, mid + 1, end) return root", "if root is None: return 1 if not isBST4(root.getLeft(), previousValue): return False if", "min of the right is <= than root if (root.getRight() != None and", "return root def convertSortedListToBST(ll, n) : return SortedListToBST(ll, 0, n - 1) #------------------------------------------------------", "= BuildBST(A, mid + 1, right) return newNode if __name__ == \"__main__\": #", "return SortedListToBST(ll, 0, n - 1) #------------------------------------------------------ #PROBLEM 96 : narasimha class Answer:", "0: self.maxValue = max(self.maxValue, root.val) return root.val if leftSum > 0 and rightSum", "n) : return SortedListToBST(ll, 0, n - 1) #------------------------------------------------------ #PROBLEM 96 : narasimha", "mid + 1, right) return newNode if __name__ == \"__main__\": # create the", "return 1 if not isBST4(root.getLeft(), previousValue): return False if root.get_data() < lastNode[0]: return", "!= temp): p = p.next p.next = None q = temp.next temp.next =", "+ (right - left) / 2 newNode.data = A[mid] newNode.left = BuildBST(A, left,", "isBST4(root, previousValue=[NEG_INFINITY]): if root is None: return 1 if not isBST4(root.getLeft(), previousValue): return", "root = BuildBST(A, 0, len(A) - 1) print \"\\ncreating BST\" printBST(root) #------------------------------------------------------- count", "previousValue = root.get_data() return isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not head or not", "rightSum) maxValueUp = max(leftSum, rightSum) + root.val self.maxValue = max(self.maxValue, maxValueUp) return maxValueUp", "p = p.next p.next = None q = temp.next temp.next = None temp.prev", "newNode) : print(\"Memory Error\") return if(left == right): newNode.data = A[left] newNode.left =", "k): global count if (not root): return None left = kthSmallestInBST(root.left, k) if", "max(self.maxValue, root.val) return root.val if leftSum > 0 and rightSum > 0: self.maxValue", "p.next = None q = temp.next temp.next = None temp.prev = DLLtoBalancedBST(head) temp.next", "= SortedListToBST(ll, mid + 1, end) return root def convertSortedListToBST(ll, n) : return", "0 def kthSmallestInBST(root, k): global count if (not root): return None left =", "if(left == right): newNode.data = A[left] newNode.left = None newNode.right = None else", "Answer: def maxPathSum(self, root): self.maxValue = float(\"-inf\") self.maxPathSumRec(root) return self.maxValue def maxPathSumRec(self, root):", "newNode if __name__ == \"__main__\": # create the sample BST A = [2,", "global count if (not root): return None left = kthSmallestInBST(root.left, k) if (left):", "if (root.getLeft() != None and FindMax(root.getLeft()) > root.get_data()) return 0 # false if", "self.maxPathSumRec(root) return self.maxValue def maxPathSumRec(self, root): if root == None: return 0 leftSum", "def convertSortedListToBST(ll, n) : return SortedListToBST(ll, 0, n - 1) #------------------------------------------------------ #PROBLEM 96", "1 if (count == k): return root return kthSmallestInBST(root.right, k) #------------------------------------------------------- def SortedListToBST(ll,", "binary tree is a binary search tree def IsBST3(root): if root == None:", "chapter for this function. p = head #We can use two-pointer logic to", "if(start > end): return None # same as (start+end)/2, avoids overflow mid =", "7] root = BuildBST(A, 0, len(A) - 1) print \"\\ncreating BST\" printBST(root) #-------------------------------------------------------", "root return kthSmallestInBST(root.right, k) #------------------------------------------------------- def SortedListToBST(ll, start, end): if(start > end): return", "(left): return left count += 1 if (count == k): return root return", "end) return root def convertSortedListToBST(ll, n) : return SortedListToBST(ll, 0, n - 1)", "A = [2, 3, 4, 5, 6, 7] root = BuildBST(A, 0, len(A)", "96 : narasimha class Answer: def maxPathSum(self, root): self.maxValue = float(\"-inf\") self.maxPathSumRec(root) return", "# Refer Linked Lists chapter for this function. p = head #We can", "if root == None: return 1 # false if the max of the", "= DLLtoBalancedBST(q) return temp #---------------------------------------------------------- def BuildBST(A, left, right) : if(left > right):", "if (root.getRight() != None and FindMin(root.getRight()) < root.get_data()) return 0 # false if,", "== None: return 1 # false if the max of the left is", "leftSum > 0 and rightSum > 0: self.maxValue = max(self.maxValue, root.val + leftSum", "= [2, 3, 4, 5, 6, 7] root = BuildBST(A, 0, len(A) -", "right): newNode.data = A[left] newNode.left = None newNode.right = None else : mid", "mid - 1) root = BSTNode(ll.head.data) ll.deleteBeg() root.left = left root.right = SortedListToBST(ll,", "narasimha class Answer: def maxPathSum(self, root): self.maxValue = float(\"-inf\") self.maxPathSumRec(root) return self.maxValue def", "temp.next = DLLtoBalancedBST(q) return temp #---------------------------------------------------------- def BuildBST(A, left, right) : if(left >", "head.next): return head temp = FindMiddleNode(head) # Refer Linked Lists chapter for this", "# passing all that, it's a BST return 1 #METHOD 2 def isBST4(root,", "leftSum = self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right) if leftSum < 0 and rightSum <", "use two-pointer logic to find the middle node while(p.next != temp): p =", "= max(self.maxValue, root.val) return root.val if leftSum > 0 and rightSum > 0:", "2 left = SortedListToBST(ll, start, mid - 1) root = BSTNode(ll.head.data) ll.deleteBeg() root.left", "if the max of the left is > than root if (root.getLeft() !=", "rightSum = self.maxPathSumRec(root.right) if leftSum < 0 and rightSum < 0: self.maxValue =", "== \"__main__\": # create the sample BST A = [2, 3, 4, 5,", "> right): return None newNode = Node() if(not newNode) : print(\"Memory Error\") return", "for this function. p = head #We can use two-pointer logic to find", ": if(left > right): return None newNode = Node() if(not newNode) : print(\"Memory", "passing all that, it's a BST return 1 #METHOD 2 def isBST4(root, previousValue=[NEG_INFINITY]):", "root.val + leftSum + rightSum) maxValueUp = max(leftSum, rightSum) + root.val self.maxValue =", "len(A) - 1) print \"\\ncreating BST\" printBST(root) #------------------------------------------------------- count = 0 def kthSmallestInBST(root,", "can use two-pointer logic to find the middle node while(p.next != temp): p", "= None temp.prev = DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q) return temp #---------------------------------------------------------- def BuildBST(A,", "root = BSTNode(ll.head.data) ll.deleteBeg() root.left = left root.right = SortedListToBST(ll, mid + 1,", "false if, recursively, the left or right is not a BST if (not", "0 previousValue = root.get_data() return isBST4(root.getRight(), previousValue) #----------------------------------------------------------------------- def DLLtoBalancedBST(head): if(not head or", "p = head #We can use two-pointer logic to find the middle node", "sample BST A = [2, 3, 4, 5, 6, 7] root = BuildBST(A,", "or right is not a BST if (not IsBST3(root.getLeft()) or not IsBST3(root.getRight())) return", "node while(p.next != temp): p = p.next p.next = None q = temp.next", "or not head.next): return head temp = FindMiddleNode(head) # Refer Linked Lists chapter", "1) root = BSTNode(ll.head.data) ll.deleteBeg() root.left = left root.right = SortedListToBST(ll, mid +", "\"\\ncreating BST\" printBST(root) #------------------------------------------------------- count = 0 def kthSmallestInBST(root, k): global count if", "0 # false if the min of the right is <= than root", "and rightSum < 0: self.maxValue = max(self.maxValue, root.val) return root.val if leftSum >", "= FindMiddleNode(head) # Refer Linked Lists chapter for this function. p = head", "return head temp = FindMiddleNode(head) # Refer Linked Lists chapter for this function.", "if root.get_data() < lastNode[0]: return 0 previousValue = root.get_data() return isBST4(root.getRight(), previousValue) #-----------------------------------------------------------------------", "maxPathSum(self, root): self.maxValue = float(\"-inf\") self.maxPathSumRec(root) return self.maxValue def maxPathSumRec(self, root): if root", "k): return root return kthSmallestInBST(root.right, k) #------------------------------------------------------- def SortedListToBST(ll, start, end): if(start >", "left, mid - 1) newNode.right = BuildBST(A, mid + 1, right) return newNode", "tree is a binary search tree def IsBST3(root): if root == None: return", "p.next p.next = None q = temp.next temp.next = None temp.prev = DLLtoBalancedBST(head)", ": mid = left + (right - left) / 2 newNode.data = A[mid]", "SortedListToBST(ll, start, mid - 1) root = BSTNode(ll.head.data) ll.deleteBeg() root.left = left root.right", "def isBST4(root, previousValue=[NEG_INFINITY]): if root is None: return 1 if not isBST4(root.getLeft(), previousValue):", "n - 1) #------------------------------------------------------ #PROBLEM 96 : narasimha class Answer: def maxPathSum(self, root):", "leftSum + rightSum) maxValueUp = max(leftSum, rightSum) + root.val self.maxValue = max(self.maxValue, maxValueUp)", "= A[mid] newNode.left = BuildBST(A, left, mid - 1) newNode.right = BuildBST(A, mid", "self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right) if leftSum < 0 and rightSum < 0: self.maxValue", "return 0 # passing all that, it's a BST return 1 #METHOD 2", "Node() if(not newNode) : print(\"Memory Error\") return if(left == right): newNode.data = A[left]", "def maxPathSumRec(self, root): if root == None: return 0 leftSum = self.maxPathSumRec(root.left) rightSum", "IsBST3(root): if root == None: return 1 # false if the max of", "BST\" printBST(root) #------------------------------------------------------- count = 0 def kthSmallestInBST(root, k): global count if (not", "root.left = left root.right = SortedListToBST(ll, mid + 1, end) return root def", "return if(left == right): newNode.data = A[left] newNode.left = None newNode.right = None", "root.get_data()) return 0 # false if, recursively, the left or right is not", "than root if (root.getRight() != None and FindMin(root.getRight()) < root.get_data()) return 0 #", "0: self.maxValue = max(self.maxValue, root.val + leftSum + rightSum) maxValueUp = max(leftSum, rightSum)", "temp.prev = DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q) return temp #---------------------------------------------------------- def BuildBST(A, left, right)", "1, end) return root def convertSortedListToBST(ll, n) : return SortedListToBST(ll, 0, n -", "root.get_data()) return 0 # false if the min of the right is <=", "return left count += 1 if (count == k): return root return kthSmallestInBST(root.right,", "root): self.maxValue = float(\"-inf\") self.maxPathSumRec(root) return self.maxValue def maxPathSumRec(self, root): if root ==", "def kthSmallestInBST(root, k): global count if (not root): return None left = kthSmallestInBST(root.left,", "right is <= than root if (root.getRight() != None and FindMin(root.getRight()) < root.get_data())", "Error\") return if(left == right): newNode.data = A[left] newNode.left = None newNode.right =", "> end): return None # same as (start+end)/2, avoids overflow mid = start", "a BST return 1 #METHOD 2 def isBST4(root, previousValue=[NEG_INFINITY]): if root is None:", "search tree def IsBST3(root): if root == None: return 1 # false if", "= BuildBST(A, 0, len(A) - 1) print \"\\ncreating BST\" printBST(root) #------------------------------------------------------- count =", "= Node() if(not newNode) : print(\"Memory Error\") return if(left == right): newNode.data =", "return None # same as (start+end)/2, avoids overflow mid = start + (end", "temp #---------------------------------------------------------- def BuildBST(A, left, right) : if(left > right): return None newNode", "rightSum < 0: self.maxValue = max(self.maxValue, root.val) return root.val if leftSum > 0", "DLLtoBalancedBST(head): if(not head or not head.next): return head temp = FindMiddleNode(head) # Refer", "== None: return 0 leftSum = self.maxPathSumRec(root.left) rightSum = self.maxPathSumRec(root.right) if leftSum <", "- left) / 2 newNode.data = A[mid] newNode.left = BuildBST(A, left, mid -", "same as (start+end)/2, avoids overflow mid = start + (end - start) //", "head #We can use two-pointer logic to find the middle node while(p.next !=", "# same as (start+end)/2, avoids overflow mid = start + (end - start)", "None temp.prev = DLLtoBalancedBST(head) temp.next = DLLtoBalancedBST(q) return temp #---------------------------------------------------------- def BuildBST(A, left,", "BuildBST(A, left, mid - 1) newNode.right = BuildBST(A, mid + 1, right) return" ]
[ "* W + b out_a = model(in_a) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs", "b out_a = model(in_a) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a], feed_dict={in_a:", "tf tf.disable_v2_behavior() in_a = tf.placeholder(dtype=tf.float32, shape=(2)) def model(x): with tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\",", "tensorflow.compat.v1 as tf tf.disable_v2_behavior() in_a = tf.placeholder(dtype=tf.float32, shape=(2)) def model(x): with tf.variable_scope(\"matmul\"): W", "= tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x * W + b out_a = model(in_a) with", "import tensorflow.compat.v1 as tf tf.disable_v2_behavior() in_a = tf.placeholder(dtype=tf.float32, shape=(2)) def model(x): with tf.variable_scope(\"matmul\"):", "tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x * W + b out_a = model(in_a) with tf.Session()", "<reponame>shantam21/Deep-Learning-with-TensorFlow-2-and-Keras<gh_stars>100-1000 import tensorflow.compat.v1 as tf tf.disable_v2_behavior() in_a = tf.placeholder(dtype=tf.float32, shape=(2)) def model(x): with", "tf.disable_v2_behavior() in_a = tf.placeholder(dtype=tf.float32, shape=(2)) def model(x): with tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2)))", "model(x): with tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x", "W + b out_a = model(in_a) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs =", "= tf.placeholder(dtype=tf.float32, shape=(2)) def model(x): with tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b =", "b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x * W + b out_a = model(in_a)", "out_a = model(in_a) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a], feed_dict={in_a: [1,", "initializer=tf.zeros(shape=(2))) return x * W + b out_a = model(in_a) with tf.Session() as", "def model(x): with tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return", "initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x * W + b out_a =", "model(in_a) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a], feed_dict={in_a: [1, 0]}) writer", "tf.placeholder(dtype=tf.float32, shape=(2)) def model(x): with tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\",", "= model(in_a) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a], feed_dict={in_a: [1, 0]})", "+ b out_a = model(in_a) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a],", "W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x * W +", "as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a], feed_dict={in_a: [1, 0]}) writer = tf.summary.FileWriter(\"./logs/example\", sess.graph)", "= tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x * W + b", "as tf tf.disable_v2_behavior() in_a = tf.placeholder(dtype=tf.float32, shape=(2)) def model(x): with tf.variable_scope(\"matmul\"): W =", "with tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x *", "tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x * W + b out_a", "x * W + b out_a = model(in_a) with tf.Session() as sess: sess.run(tf.global_variables_initializer())", "with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a], feed_dict={in_a: [1, 0]}) writer =", "tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2))) return x * W", "in_a = tf.placeholder(dtype=tf.float32, shape=(2)) def model(x): with tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b", "return x * W + b out_a = model(in_a) with tf.Session() as sess:", "shape=(2)) def model(x): with tf.variable_scope(\"matmul\"): W = tf.get_variable(\"W\", initializer=tf.ones(shape=(2,2))) b = tf.get_variable(\"b\", initializer=tf.zeros(shape=(2)))", "tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a], feed_dict={in_a: [1, 0]}) writer = tf.summary.FileWriter(\"./logs/example\"," ]
[ "'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock ) as", "len(call_args_list) == 3 assert call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1] ==", "= mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) == 3 assert call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value}", "import pandas as pd from unittest.mock import patch, Mock, PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as", "import ramjet.data_interface.tess_transit_metadata_manager as module from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface import ToiColumns", ") as mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list", "unittest.mock import patch, Mock, PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as module from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager,", "patch, Mock, PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as module from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition from", "mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value: ['KP', '',", "'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2,", "import ToiColumns class TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager", "Disposition from ramjet.data_interface.tess_toi_data_interface import ToiColumns class TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self,", "module from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface import ToiColumns class TestTessTransitMetadata: @patch.object(module,", "assert call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value} assert call_args_list[2][1] == {'tic_id': 3, 'disposition':", "Disposition.CONFIRMED.value} assert call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value} assert call_args_list[2][1] == {'tic_id': 3,", "== 3 assert call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1] == {'tic_id':", "mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) == 3 assert call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value} assert", "3 assert call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1] == {'tic_id': 2,", "toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) == 3 assert", "mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list assert len(call_args_list)", "with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock ) as mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock ) as", "pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock ) as mock_toi_dispositions: with patch.object(module.TessToiDataInterface,", ") as mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock ) as mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions", "= TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value: ['KP', '', 'FP']}) ctoi_dispositions", "pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value: ['KP', '', 'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value:", "from ramjet.data_interface.tess_toi_data_interface import ToiColumns class TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition,", "= pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock ) as mock_toi_dispositions: with", "[]}) with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock ) as mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock )", "mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock ) as mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value =", "assert call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1] == {'tic_id': 2, 'disposition':", "as module from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface import ToiColumns class TestTessTransitMetadata:", "ramjet.data_interface.tess_toi_data_interface import ToiColumns class TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase):", "ToiColumns.disposition.value: ['KP', '', 'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface, 'toi_dispositions',", "call_args_list = mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) == 3 assert call_args_list[0][1] == {'tic_id': 1, 'disposition':", "'ctoi_dispositions', new_callable=PropertyMock ) as mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list", "as pd from unittest.mock import patch, Mock, PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as module from", "as mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list assert", "mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) ==", "test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value:", "from unittest.mock import patch, Mock, PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as module from ramjet.data_interface.tess_transit_metadata_manager import", "[], ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock ) as mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions',", "3], ToiColumns.disposition.value: ['KP', '', 'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface,", "import patch, Mock, PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as module from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition", "ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock ) as mock_toi_dispositions:", "class TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager()", "TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface import ToiColumns class TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata') def", "mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) == 3 assert call_args_list[0][1]", "import TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface import ToiColumns class TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata')", "pd from unittest.mock import patch, Mock, PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as module from ramjet.data_interface.tess_transit_metadata_manager", "assert len(call_args_list) == 3 assert call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1]", "from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface import ToiColumns class TestTessTransitMetadata: @patch.object(module, 'metadatabase')", "== {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value} assert", "= pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value: ['KP', '', 'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [],", "2, 3], ToiColumns.disposition.value: ['KP', '', 'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}) with", "tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) == 3 assert call_args_list[0][1] == {'tic_id': 1,", "patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock ) as mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock ) as mock_ctoi_dispositions:", "pandas as pd from unittest.mock import patch, Mock, PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as module", "TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value: ['KP', '', 'FP']}) ctoi_dispositions =", "'toi_dispositions', new_callable=PropertyMock ) as mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock ) as mock_ctoi_dispositions: mock_toi_dispositions.return_value", "@patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1,", "= toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) == 3", "Mock, PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as module from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface", "as mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock ) as mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value", "def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3],", "= ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) == 3 assert call_args_list[0][1] ==", "with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock ) as mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions", "'metadatabase') @patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value:", "new_callable=PropertyMock ) as mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list =", "patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock ) as mock_ctoi_dispositions: mock_toi_dispositions.return_value = toi_dispositions mock_ctoi_dispositions.return_value = ctoi_dispositions tess_transit_disposition_metadata_manager.build_table()", "mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value: ['KP',", "'', 'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock )", "1, 'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value} assert call_args_list[2][1] ==", "tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value: ['KP', '', 'FP']})", "[1, 2, 3], ToiColumns.disposition.value: ['KP', '', 'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []})", "call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value}", "['KP', '', 'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [], ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock", "ToiColumns.disposition.value: []}) with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock ) as mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock", "new_callable=PropertyMock ) as mock_toi_dispositions: with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock ) as mock_ctoi_dispositions: mock_toi_dispositions.return_value =", "ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface import ToiColumns class TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module,", "call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value} assert call_args_list[2][1] == {'tic_id': 3, 'disposition': Disposition.FALSE_POSITIVE.value}", "@patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions =", "{'tic_id': 1, 'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value} assert call_args_list[2][1]", "'disposition': Disposition.CONFIRMED.value} assert call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value} assert call_args_list[2][1] == {'tic_id':", "ctoi_dispositions tess_transit_disposition_metadata_manager.build_table() call_args_list = mock_tess_target_transit_disposition.call_args_list assert len(call_args_list) == 3 assert call_args_list[0][1] == {'tic_id':", "PropertyMock import ramjet.data_interface.tess_transit_metadata_manager as module from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface import", "ramjet.data_interface.tess_transit_metadata_manager as module from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition from ramjet.data_interface.tess_toi_data_interface import ToiColumns class", "TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager = TessTransitMetadataManager() toi_dispositions", "toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3], ToiColumns.disposition.value: ['KP', '', 'FP']}) ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value:", "ToiColumns class TestTessTransitMetadata: @patch.object(module, 'metadatabase') @patch.object(module, 'TessTransitMetadata') def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition, mock_metadatabase): tess_transit_disposition_metadata_manager =" ]
[ "name \\n\" %(queue)+\\ \"#SBATCH -o %s.o%s # Name of stdout output file \\n\"", "str, bytes import fileinput import argparse import os import sys import subprocess python_path", "+\\ \"#SBATCH -A %s \\nmodule load gcc\\nmodule load java\\n\" %(allocation) +\\ 'ulimit -c", "parser = argparse.ArgumentParser(description='A script to create slurm scripts from list of commands') parser.add_argument('-c',", "2013lambowitz)', choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss) default:", "help='Number of command per node (default: 1)') parser.add_argument('-A', '--allocation', default = '2013lambowitz', help=", "+= 1 if i % numberOfJob == 0: writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode,", "commandlist: writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1", "'w') as slurmFile: print(options, file = slurmFile) if concurrent_job == 1: print('bash %s'", "of tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH -p %s # Queue name \\n\" %(queue)+\\ \"#SBATCH", "+ '\\n', file = commandFile) return 0 def main(args): commandFile = args.cmdlst jobname", "slurm scripts from list of commands') parser.add_argument('-c', '--cmdlst', help='A list of command, each", "parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of command per node (default: 1)') parser.add_argument('-A', '--allocation',", "\\n\" %(numberOfNode)+\\ \"#SBATCH -n 24 # Total number of tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH", "numberOfNode = args.numberOfNode allocation = args.allocation queue = args.queue time = args.time concurrent_job", "parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes',", "print(options, file = slurmFile) if concurrent_job == 1: print('bash %s' %(commandFiles), file =", "i += 1 if i % numberOfJob == 0: writeJob(commandlist, jobname, commandRank, numberOfJob,", "= args.jobname numberOfJob = args.numberOfCmd numberOfNode = args.numberOfNode allocation = args.allocation queue =", "open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist) + '\\n', file = commandFile) return 0 def main(args):", "'--numberOfNode', default=1, type=int, help='Number of node for each job (default: 1)') parser.add_argument('-n', '--numberOfCmd',", "(default: job)') parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of node for each job (default:", "gcc\\nmodule load java\\n\" %(allocation) +\\ 'ulimit -c unlimited\\n' +\\ \"export PATH=%s:$PATH\" %system_path with", "command per node (default: 1)') parser.add_argument('-A', '--allocation', default = '2013lambowitz', help= 'Account (default:", "%i\\n\" %(numberOfJob)+\\ \"#SBATCH -p %s # Queue name \\n\" %(queue)+\\ \"#SBATCH -o %s.o%s", "numberOfJob == 0: writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job) commandRank", "concurrent_job): commandFiles = 'command_%i.bash' %commandRank options = \\ \"#!/bin/bash \\n\" +\\ \"#SBATCH -J", "fileinput import argparse import os import sys import subprocess python_path = subprocess.check_output(['which' ,'python']).decode('utf-8')", "file = slurmFile) with open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist) + '\\n', file = commandFile)", "script to create slurm scripts from list of commands') parser.add_argument('-c', '--cmdlst', help='A list", "writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 i", "numberOfJob, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 i = 0 commandlist=[]", "many process to run in the same time (default: 24)', type=int) args =", "\"export PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile: print(options, file = slurmFile)", "sys import subprocess python_path = subprocess.check_output(['which' ,'python']).decode('utf-8') system_path = os.path.dirname(python_path) def writeJob(commandlist, jobname,", "print('\\n'.join(commandlist) + '\\n', file = commandFile) return 0 def main(args): commandFile = args.cmdlst", "as commandFile: print('\\n'.join(commandlist) + '\\n', file = commandFile) return 0 def main(args): commandFile", "commands = f.readlines() commandlist = [] i = 0 commandRank = 0 for", "(hh:mm:ss) \\n\" %time +\\ \"#SBATCH -A %s \\nmodule load gcc\\nmodule load java\\n\" %(allocation)", "parser.add_argument('-p','--processes', default=24,help='How many process to run in the same time (default: 24)', type=int)", "\"#SBATCH -n 24 # Total number of tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH -p %s", "args.jobname numberOfJob = args.numberOfCmd numberOfNode = args.numberOfNode allocation = args.allocation queue = args.queue", "= args.allocation queue = args.queue time = args.time concurrent_job = args.processes with open(commandFile,'r')", "__name__ == '__main__': parser = argparse.ArgumentParser(description='A script to create slurm scripts from list", "%(jobname) +\\ \"#SBATCH -N %i # Total number of nodes \\n\" %(numberOfNode)+\\ \"#SBATCH", "a command', required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)') parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number", "default='job',help='Jobname (default: job)') parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of node for each job", "def main(args): commandFile = args.cmdlst jobname = args.jobname numberOfJob = args.numberOfCmd numberOfNode =", "name \\n\" %(jobname) +\\ \"#SBATCH -N %i # Total number of nodes \\n\"", "commands: commandlist.append(str(command).strip()) i += 1 if i % numberOfJob == 0: writeJob(commandlist, jobname,", "system_path = os.path.dirname(python_path) def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job):", "= args.time concurrent_job = args.processes with open(commandFile,'r') as f: commands = f.readlines() commandlist", "1 i = 0 commandlist=[] if commandlist: writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation,", "f: commands = f.readlines() commandlist = [] i = 0 commandRank = 0", "f.readlines() commandlist = [] i = 0 commandRank = 0 for command in", "import print_function from builtins import str, bytes import fileinput import argparse import os", "def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job): commandFiles = 'command_%i.bash'", "commands') parser.add_argument('-c', '--cmdlst', help='A list of command, each line is a command', required=True)", "is a command', required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)') parser.add_argument('-N', '--numberOfNode', default=1, type=int,", "'command_%i.bash' %commandRank options = \\ \"#!/bin/bash \\n\" +\\ \"#SBATCH -J %s # Job", "writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 print('Written", "= 0 commandlist=[] if commandlist: writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation, queue, time,", "default=1, type=int, help='Number of node for each job (default: 1)') parser.add_argument('-n', '--numberOfCmd', default=1,", "if __name__ == '__main__': parser = argparse.ArgumentParser(description='A script to create slurm scripts from", "import argparse import os import sys import subprocess python_path = subprocess.check_output(['which' ,'python']).decode('utf-8') system_path", "commandRank += 1 print('Written %i scripts' %commandRank, file = sys.stdout) return 0 if", "-t %s # Run time (hh:mm:ss) \\n\" %time +\\ \"#SBATCH -A %s \\nmodule", "0: writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1", "line is a command', required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)') parser.add_argument('-N', '--numberOfNode', default=1,", "os.path.dirname(python_path) def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job): commandFiles =", "# Total number of tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH -p %s # Queue name", "number of tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH -p %s # Queue name \\n\" %(queue)+\\", "'--numberOfCmd', default=1, type=int, help='Number of command per node (default: 1)') parser.add_argument('-A', '--allocation', default", "\\n\" +\\ \"#SBATCH -J %s # Job name \\n\" %(jobname) +\\ \"#SBATCH -N", "commandRank = 0 for command in commands: commandlist.append(str(command).strip()) i += 1 if i", "each line is a command', required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)') parser.add_argument('-N', '--numberOfNode',", "# Run time (hh:mm:ss) \\n\" %time +\\ \"#SBATCH -A %s \\nmodule load gcc\\nmodule", "+\\ \"#SBATCH -J %s # Job name \\n\" %(jobname) +\\ \"#SBATCH -N %i", "of nodes \\n\" %(numberOfNode)+\\ \"#SBATCH -n 24 # Total number of tasks %i\\n\"", "jobname, commandRank, i, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 print('Written %i", "tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH -p %s # Queue name \\n\" %(queue)+\\ \"#SBATCH -o", "i % numberOfJob == 0: writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time,", "+= 1 i = 0 commandlist=[] if commandlist: writeJob(commandlist, jobname, commandRank, i, numberOfNode,", "= argparse.ArgumentParser(description='A script to create slurm scripts from list of commands') parser.add_argument('-c', '--cmdlst',", "list of command, each line is a command', required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname (default:", "type=int, help='Number of command per node (default: 1)') parser.add_argument('-A', '--allocation', default = '2013lambowitz',", "default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes', default=24,help='How many process to run in the same time", "for command in commands: commandlist.append(str(command).strip()) i += 1 if i % numberOfJob ==", "slurmFile) if concurrent_job == 1: print('bash %s' %(commandFiles), file = slurmFile) else: print('parallel", "= {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue',", "= commandFile) return 0 def main(args): commandFile = args.cmdlst jobname = args.jobname numberOfJob", "Run time (hh:mm:ss) \\n\" %time +\\ \"#SBATCH -A %s \\nmodule load gcc\\nmodule load", "[] i = 0 commandRank = 0 for command in commands: commandlist.append(str(command).strip()) i", "1 print('Written %i scripts' %commandRank, file = sys.stdout) return 0 if __name__ ==", "writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job): commandFiles = 'command_%i.bash' %commandRank", "command, each line is a command', required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)') parser.add_argument('-N',", "%s \\nmodule load gcc\\nmodule load java\\n\" %(allocation) +\\ 'ulimit -c unlimited\\n' +\\ \"export", "import sys import subprocess python_path = subprocess.check_output(['which' ,'python']).decode('utf-8') system_path = os.path.dirname(python_path) def writeJob(commandlist,", "__future__ import print_function from builtins import str, bytes import fileinput import argparse import", "stdout output file \\n\" %(jobname,'%j')+ \\ \"#SBATCH -t %s # Run time (hh:mm:ss)", "load java\\n\" %(allocation) +\\ 'ulimit -c unlimited\\n' +\\ \"export PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm'", "%s' %(commandFiles), file = slurmFile) else: print('parallel -j%i :::: %s \\n' %(concurrent_job,commandFiles), file", "'Account (default: 2013lambowitz)', choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00', help='Run time", "commandlist = [] i = 0 commandRank = 0 for command in commands:", "= sys.stdout) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser(description='A script to", "'__main__': parser = argparse.ArgumentParser(description='A script to create slurm scripts from list of commands')", "allocation, queue, time, concurrent_job): commandFiles = 'command_%i.bash' %commandRank options = \\ \"#!/bin/bash \\n\"", "for each job (default: 1)') parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of command per", "time (hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes', default=24,help='How many process to", "(default: 2013lambowitz)', choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss)", "default=1, type=int, help='Number of command per node (default: 1)') parser.add_argument('-A', '--allocation', default =", "'2013lambowitz', help= 'Account (default: 2013lambowitz)', choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00',", "commandFile: print('\\n'.join(commandlist) + '\\n', file = commandFile) return 0 def main(args): commandFile =", "print('Written %i scripts' %commandRank, file = sys.stdout) return 0 if __name__ == '__main__':", "parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)') parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of node for", "commandlist.append(str(command).strip()) i += 1 if i % numberOfJob == 0: writeJob(commandlist, jobname, commandRank,", "default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes', default=24,help='How many", "file = slurmFile) else: print('parallel -j%i :::: %s \\n' %(concurrent_job,commandFiles), file = slurmFile)", "1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes', default=24,help='How many process to run in the", "'2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default:", "main(args): commandFile = args.cmdlst jobname = args.jobname numberOfJob = args.numberOfCmd numberOfNode = args.numberOfNode", "args.numberOfNode allocation = args.allocation queue = args.queue time = args.time concurrent_job = args.processes", "Queue name \\n\" %(queue)+\\ \"#SBATCH -o %s.o%s # Name of stdout output file", "0 for command in commands: commandlist.append(str(command).strip()) i += 1 if i % numberOfJob", "== '__main__': parser = argparse.ArgumentParser(description='A script to create slurm scripts from list of", "jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job): commandFiles = 'command_%i.bash' %commandRank options", "= f.readlines() commandlist = [] i = 0 commandRank = 0 for command", "help='Number of node for each job (default: 1)') parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number", "%(allocation) +\\ 'ulimit -c unlimited\\n' +\\ \"export PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm' %(commandRank), 'w')", "%system_path with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile: print(options, file = slurmFile) if concurrent_job", "% numberOfJob == 0: writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job)", "(default: normal)') parser.add_argument('-p','--processes', default=24,help='How many process to run in the same time (default:", "concurrent_job) commandRank += 1 i = 0 commandlist=[] if commandlist: writeJob(commandlist, jobname, commandRank,", "default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes', default=24,help='How many process to run in", "parser.add_argument('-A', '--allocation', default = '2013lambowitz', help= 'Account (default: 2013lambowitz)', choices = {'tRNA-profiling-and-b', '2013lambowitz',", "of commands') parser.add_argument('-c', '--cmdlst', help='A list of command, each line is a command',", "bytes import fileinput import argparse import os import sys import subprocess python_path =", "= args.queue time = args.time concurrent_job = args.processes with open(commandFile,'r') as f: commands", "return 0 if __name__ == '__main__': parser = argparse.ArgumentParser(description='A script to create slurm", "Name of stdout output file \\n\" %(jobname,'%j')+ \\ \"#SBATCH -t %s # Run", "python_path = subprocess.check_output(['which' ,'python']).decode('utf-8') system_path = os.path.dirname(python_path) def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode,", "per node (default: 1)') parser.add_argument('-A', '--allocation', default = '2013lambowitz', help= 'Account (default: 2013lambowitz)',", "%i # Total number of nodes \\n\" %(numberOfNode)+\\ \"#SBATCH -n 24 # Total", "'--time', default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes', default=24,help='How", "Total number of nodes \\n\" %(numberOfNode)+\\ \"#SBATCH -n 24 # Total number of", "concurrent_job = args.processes with open(commandFile,'r') as f: commands = f.readlines() commandlist = []", "\"#SBATCH -t %s # Run time (hh:mm:ss) \\n\" %time +\\ \"#SBATCH -A %s", "= 'command_%i.bash' %commandRank options = \\ \"#!/bin/bash \\n\" +\\ \"#SBATCH -J %s #", "= slurmFile) with open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist) + '\\n', file = commandFile) return", "jobname = args.jobname numberOfJob = args.numberOfCmd numberOfNode = args.numberOfNode allocation = args.allocation queue", "+\\ \"export PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile: print(options, file =", "= subprocess.check_output(['which' ,'python']).decode('utf-8') system_path = os.path.dirname(python_path) def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation,", "%(concurrent_job,commandFiles), file = slurmFile) with open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist) + '\\n', file =", "of command, each line is a command', required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)')", "numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 i = 0 commandlist=[] if", "help= 'Account (default: 2013lambowitz)', choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00', help='Run", "\\ \"#SBATCH -t %s # Run time (hh:mm:ss) \\n\" %time +\\ \"#SBATCH -A", "1)') parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of command per node (default: 1)') parser.add_argument('-A',", "1 if i % numberOfJob == 0: writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation,", "'--jobname', default='job',help='Jobname (default: job)') parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of node for each", "else: print('parallel -j%i :::: %s \\n' %(concurrent_job,commandFiles), file = slurmFile) with open(commandFiles,'w') as", "as f: commands = f.readlines() commandlist = [] i = 0 commandRank =", "i = 0 commandlist=[] if commandlist: writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation, queue,", "\"#SBATCH -J %s # Job name \\n\" %(jobname) +\\ \"#SBATCH -N %i #", "default = '2013lambowitz', help= 'Account (default: 2013lambowitz)', choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t',", "24 # Total number of tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH -p %s # Queue", "numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 print('Written %i scripts' %commandRank, file", "1)') parser.add_argument('-A', '--allocation', default = '2013lambowitz', help= 'Account (default: 2013lambowitz)', choices = {'tRNA-profiling-and-b',", "commandFile) return 0 def main(args): commandFile = args.cmdlst jobname = args.jobname numberOfJob =", "print_function from builtins import str, bytes import fileinput import argparse import os import", "args.numberOfCmd numberOfNode = args.numberOfNode allocation = args.allocation queue = args.queue time = args.time", "+\\ \"#SBATCH -N %i # Total number of nodes \\n\" %(numberOfNode)+\\ \"#SBATCH -n", "command', required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)') parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of", "= slurmFile) if concurrent_job == 1: print('bash %s' %(commandFiles), file = slurmFile) else:", "\\nmodule load gcc\\nmodule load java\\n\" %(allocation) +\\ 'ulimit -c unlimited\\n' +\\ \"export PATH=%s:$PATH\"", "0 def main(args): commandFile = args.cmdlst jobname = args.jobname numberOfJob = args.numberOfCmd numberOfNode", "commandFile = args.cmdlst jobname = args.jobname numberOfJob = args.numberOfCmd numberOfNode = args.numberOfNode allocation", "default=24,help='How many process to run in the same time (default: 24)', type=int) args", "numberOfJob = args.numberOfCmd numberOfNode = args.numberOfNode allocation = args.allocation queue = args.queue time", "parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of node for each job (default: 1)') parser.add_argument('-n',", "os import sys import subprocess python_path = subprocess.check_output(['which' ,'python']).decode('utf-8') system_path = os.path.dirname(python_path) def", "args.allocation queue = args.queue time = args.time concurrent_job = args.processes with open(commandFile,'r') as", "queue = args.queue time = args.time concurrent_job = args.processes with open(commandFile,'r') as f:", "job)') parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of node for each job (default: 1)')", "%commandRank, file = sys.stdout) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser(description='A", "argparse.ArgumentParser(description='A script to create slurm scripts from list of commands') parser.add_argument('-c', '--cmdlst', help='A", "\\n\" %(jobname,'%j')+ \\ \"#SBATCH -t %s # Run time (hh:mm:ss) \\n\" %time +\\", "job (default: 1)') parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of command per node (default:", "python from __future__ import print_function from builtins import str, bytes import fileinput import", "{'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue", "open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile: print(options, file = slurmFile) if concurrent_job == 1:", "concurrent_job) commandRank += 1 print('Written %i scripts' %commandRank, file = sys.stdout) return 0", "= '2013lambowitz', help= 'Account (default: 2013lambowitz)', choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time',", "open(commandFile,'r') as f: commands = f.readlines() commandlist = [] i = 0 commandRank", "= args.cmdlst jobname = args.jobname numberOfJob = args.numberOfCmd numberOfNode = args.numberOfNode allocation =", "with open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist) + '\\n', file = commandFile) return 0 def", "Total number of tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH -p %s # Queue name \\n\"", "time (hh:mm:ss) \\n\" %time +\\ \"#SBATCH -A %s \\nmodule load gcc\\nmodule load java\\n\"", "java\\n\" %(allocation) +\\ 'ulimit -c unlimited\\n' +\\ \"export PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm' %(commandRank),", "'\\n', file = commandFile) return 0 def main(args): commandFile = args.cmdlst jobname =", "queue, time, concurrent_job) commandRank += 1 print('Written %i scripts' %commandRank, file = sys.stdout)", "of command per node (default: 1)') parser.add_argument('-A', '--allocation', default = '2013lambowitz', help= 'Account", "%commandRank options = \\ \"#!/bin/bash \\n\" +\\ \"#SBATCH -J %s # Job name", "file = commandFile) return 0 def main(args): commandFile = args.cmdlst jobname = args.jobname", "-n 24 # Total number of tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH -p %s #", "%s # Job name \\n\" %(jobname) +\\ \"#SBATCH -N %i # Total number", "args.queue time = args.time concurrent_job = args.processes with open(commandFile,'r') as f: commands =", "normal)') parser.add_argument('-p','--processes', default=24,help='How many process to run in the same time (default: 24)',", "options = \\ \"#!/bin/bash \\n\" +\\ \"#SBATCH -J %s # Job name \\n\"", "from list of commands') parser.add_argument('-c', '--cmdlst', help='A list of command, each line is", "slurmFile: print(options, file = slurmFile) if concurrent_job == 1: print('bash %s' %(commandFiles), file", "#!/usr/bin/env python from __future__ import print_function from builtins import str, bytes import fileinput", "-p %s # Queue name \\n\" %(queue)+\\ \"#SBATCH -o %s.o%s # Name of", "%time +\\ \"#SBATCH -A %s \\nmodule load gcc\\nmodule load java\\n\" %(allocation) +\\ 'ulimit", "i = 0 commandRank = 0 for command in commands: commandlist.append(str(command).strip()) i +=", "parser.add_argument('-c', '--cmdlst', help='A list of command, each line is a command', required=True) parser.add_argument('-j',", "with open(commandFile,'r') as f: commands = f.readlines() commandlist = [] i = 0", "-o %s.o%s # Name of stdout output file \\n\" %(jobname,'%j')+ \\ \"#SBATCH -t", "0 commandRank = 0 for command in commands: commandlist.append(str(command).strip()) i += 1 if", "%s # Run time (hh:mm:ss) \\n\" %time +\\ \"#SBATCH -A %s \\nmodule load", "+= 1 print('Written %i scripts' %commandRank, file = sys.stdout) return 0 if __name__", "if commandlist: writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation, queue, time, concurrent_job) commandRank +=", "builtins import str, bytes import fileinput import argparse import os import sys import", "queue, time, concurrent_job) commandRank += 1 i = 0 commandlist=[] if commandlist: writeJob(commandlist,", "-j%i :::: %s \\n' %(concurrent_job,commandFiles), file = slurmFile) with open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist)", "file = sys.stdout) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser(description='A script", "with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile: print(options, file = slurmFile) if concurrent_job ==", "%s.o%s # Name of stdout output file \\n\" %(jobname,'%j')+ \\ \"#SBATCH -t %s", "\\n' %(concurrent_job,commandFiles), file = slurmFile) with open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist) + '\\n', file", "\\n\" %(queue)+\\ \"#SBATCH -o %s.o%s # Name of stdout output file \\n\" %(jobname,'%j')+", "to run in the same time (default: 24)', type=int) args = parser.parse_args() main(args)", "in commands: commandlist.append(str(command).strip()) i += 1 if i % numberOfJob == 0: writeJob(commandlist,", "commandlist=[] if commandlist: writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation, queue, time, concurrent_job) commandRank", "scripts from list of commands') parser.add_argument('-c', '--cmdlst', help='A list of command, each line", "create slurm scripts from list of commands') parser.add_argument('-c', '--cmdlst', help='A list of command,", "-J %s # Job name \\n\" %(jobname) +\\ \"#SBATCH -N %i # Total", "commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 i = 0", "help='Run time (hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes', default=24,help='How many process", "import fileinput import argparse import os import sys import subprocess python_path = subprocess.check_output(['which'", "subprocess.check_output(['which' ,'python']).decode('utf-8') system_path = os.path.dirname(python_path) def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue,", "numberOfNode, allocation, queue, time, concurrent_job): commandFiles = 'command_%i.bash' %commandRank options = \\ \"#!/bin/bash", "'--allocation', default = '2013lambowitz', help= 'Account (default: 2013lambowitz)', choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'})", "\"#!/bin/bash \\n\" +\\ \"#SBATCH -J %s # Job name \\n\" %(jobname) +\\ \"#SBATCH", "# Name of stdout output file \\n\" %(jobname,'%j')+ \\ \"#SBATCH -t %s #", "required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)') parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of node", "== 0: writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job) commandRank +=", "return 0 def main(args): commandFile = args.cmdlst jobname = args.jobname numberOfJob = args.numberOfCmd", "= args.numberOfCmd numberOfNode = args.numberOfNode allocation = args.allocation queue = args.queue time =", "= 0 for command in commands: commandlist.append(str(command).strip()) i += 1 if i %", "of node for each job (default: 1)') parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of", "file \\n\" %(jobname,'%j')+ \\ \"#SBATCH -t %s # Run time (hh:mm:ss) \\n\" %time", "-c unlimited\\n' +\\ \"export PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile: print(options,", "%i scripts' %commandRank, file = sys.stdout) return 0 if __name__ == '__main__': parser", "= args.processes with open(commandFile,'r') as f: commands = f.readlines() commandlist = [] i", "each job (default: 1)') parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of command per node", "commandRank += 1 i = 0 commandlist=[] if commandlist: writeJob(commandlist, jobname, commandRank, i,", ",'python']).decode('utf-8') system_path = os.path.dirname(python_path) def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time,", "nodes \\n\" %(numberOfNode)+\\ \"#SBATCH -n 24 # Total number of tasks %i\\n\" %(numberOfJob)+\\", "parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes', default=24,help='How many process to run in the same", "= os.path.dirname(python_path) def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job): commandFiles", "%(commandRank), 'w') as slurmFile: print(options, file = slurmFile) if concurrent_job == 1: print('bash", "of stdout output file \\n\" %(jobname,'%j')+ \\ \"#SBATCH -t %s # Run time", "commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job): commandFiles = 'command_%i.bash' %commandRank options =", "to create slurm scripts from list of commands') parser.add_argument('-c', '--cmdlst', help='A list of", "print('bash %s' %(commandFiles), file = slurmFile) else: print('parallel -j%i :::: %s \\n' %(concurrent_job,commandFiles),", "list of commands') parser.add_argument('-c', '--cmdlst', help='A list of command, each line is a", "number of nodes \\n\" %(numberOfNode)+\\ \"#SBATCH -n 24 # Total number of tasks", "type=int, help='Number of node for each job (default: 1)') parser.add_argument('-n', '--numberOfCmd', default=1, type=int,", "sys.stdout) return 0 if __name__ == '__main__': parser = argparse.ArgumentParser(description='A script to create", "load gcc\\nmodule load java\\n\" %(allocation) +\\ 'ulimit -c unlimited\\n' +\\ \"export PATH=%s:$PATH\" %system_path", "help='A list of command, each line is a command', required=True) parser.add_argument('-j', '--jobname', default='job',help='Jobname", "\"#SBATCH -A %s \\nmodule load gcc\\nmodule load java\\n\" %(allocation) +\\ 'ulimit -c unlimited\\n'", "concurrent_job == 1: print('bash %s' %(commandFiles), file = slurmFile) else: print('parallel -j%i ::::", "import subprocess python_path = subprocess.check_output(['which' ,'python']).decode('utf-8') system_path = os.path.dirname(python_path) def writeJob(commandlist, jobname, commandRank,", ":::: %s \\n' %(concurrent_job,commandFiles), file = slurmFile) with open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist) +", "\\ \"#!/bin/bash \\n\" +\\ \"#SBATCH -J %s # Job name \\n\" %(jobname) +\\", "= 0 commandRank = 0 for command in commands: commandlist.append(str(command).strip()) i += 1", "process to run in the same time (default: 24)', type=int) args = parser.parse_args()", "%(queue)+\\ \"#SBATCH -o %s.o%s # Name of stdout output file \\n\" %(jobname,'%j')+ \\", "slurmFile) else: print('parallel -j%i :::: %s \\n' %(concurrent_job,commandFiles), file = slurmFile) with open(commandFiles,'w')", "%(jobname,'%j')+ \\ \"#SBATCH -t %s # Run time (hh:mm:ss) \\n\" %time +\\ \"#SBATCH", "commandRank, i, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 print('Written %i scripts'", "from builtins import str, bytes import fileinput import argparse import os import sys", "from __future__ import print_function from builtins import str, bytes import fileinput import argparse", "# Job name \\n\" %(jobname) +\\ \"#SBATCH -N %i # Total number of", "args.time concurrent_job = args.processes with open(commandFile,'r') as f: commands = f.readlines() commandlist =", "allocation, queue, time, concurrent_job) commandRank += 1 i = 0 commandlist=[] if commandlist:", "time = args.time concurrent_job = args.processes with open(commandFile,'r') as f: commands = f.readlines()", "node for each job (default: 1)') parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of command", "allocation, queue, time, concurrent_job) commandRank += 1 print('Written %i scripts' %commandRank, file =", "file = slurmFile) if concurrent_job == 1: print('bash %s' %(commandFiles), file = slurmFile)", "subprocess python_path = subprocess.check_output(['which' ,'python']).decode('utf-8') system_path = os.path.dirname(python_path) def writeJob(commandlist, jobname, commandRank, numberOfJob,", "scripts' %commandRank, file = sys.stdout) return 0 if __name__ == '__main__': parser =", "\"#SBATCH -o %s.o%s # Name of stdout output file \\n\" %(jobname,'%j')+ \\ \"#SBATCH", "= \\ \"#!/bin/bash \\n\" +\\ \"#SBATCH -J %s # Job name \\n\" %(jobname)", "-N %i # Total number of nodes \\n\" %(numberOfNode)+\\ \"#SBATCH -n 24 #", "\"#SBATCH -p %s # Queue name \\n\" %(queue)+\\ \"#SBATCH -o %s.o%s # Name", "\\n\" %(jobname) +\\ \"#SBATCH -N %i # Total number of nodes \\n\" %(numberOfNode)+\\", "\\n\" %time +\\ \"#SBATCH -A %s \\nmodule load gcc\\nmodule load java\\n\" %(allocation) +\\", "%s # Queue name \\n\" %(queue)+\\ \"#SBATCH -o %s.o%s # Name of stdout", "%(numberOfJob)+\\ \"#SBATCH -p %s # Queue name \\n\" %(queue)+\\ \"#SBATCH -o %s.o%s #", "# Total number of nodes \\n\" %(numberOfNode)+\\ \"#SBATCH -n 24 # Total number", "== 1: print('bash %s' %(commandFiles), file = slurmFile) else: print('parallel -j%i :::: %s", "'ulimit -c unlimited\\n' +\\ \"export PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile:", "Job name \\n\" %(jobname) +\\ \"#SBATCH -N %i # Total number of nodes", "# Queue name \\n\" %(queue)+\\ \"#SBATCH -o %s.o%s # Name of stdout output", "time, concurrent_job): commandFiles = 'command_%i.bash' %commandRank options = \\ \"#!/bin/bash \\n\" +\\ \"#SBATCH", "argparse import os import sys import subprocess python_path = subprocess.check_output(['which' ,'python']).decode('utf-8') system_path =", "print('parallel -j%i :::: %s \\n' %(concurrent_job,commandFiles), file = slurmFile) with open(commandFiles,'w') as commandFile:", "numberOfJob, numberOfNode, allocation, queue, time, concurrent_job): commandFiles = 'command_%i.bash' %commandRank options = \\", "\"#SBATCH -N %i # Total number of nodes \\n\" %(numberOfNode)+\\ \"#SBATCH -n 24", "queue, time, concurrent_job): commandFiles = 'command_%i.bash' %commandRank options = \\ \"#!/bin/bash \\n\" +\\", "1: print('bash %s' %(commandFiles), file = slurmFile) else: print('parallel -j%i :::: %s \\n'", "(default: 1)') parser.add_argument('-A', '--allocation', default = '2013lambowitz', help= 'Account (default: 2013lambowitz)', choices =", "= [] i = 0 commandRank = 0 for command in commands: commandlist.append(str(command).strip())", "choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00')", "0 if __name__ == '__main__': parser = argparse.ArgumentParser(description='A script to create slurm scripts", "%(commandFiles), file = slurmFile) else: print('parallel -j%i :::: %s \\n' %(concurrent_job,commandFiles), file =", "PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile: print(options, file = slurmFile) if", "(default: 1)') parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of command per node (default: 1)')", "-A %s \\nmodule load gcc\\nmodule load java\\n\" %(allocation) +\\ 'ulimit -c unlimited\\n' +\\", "jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 i =", "%s \\n' %(concurrent_job,commandFiles), file = slurmFile) with open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist) + '\\n',", "output file \\n\" %(jobname,'%j')+ \\ \"#SBATCH -t %s # Run time (hh:mm:ss) \\n\"", "args.processes with open(commandFile,'r') as f: commands = f.readlines() commandlist = [] i =", "time, concurrent_job) commandRank += 1 i = 0 commandlist=[] if commandlist: writeJob(commandlist, jobname,", "+\\ 'ulimit -c unlimited\\n' +\\ \"export PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm' %(commandRank), 'w') as", "node (default: 1)') parser.add_argument('-A', '--allocation', default = '2013lambowitz', help= 'Account (default: 2013lambowitz)', choices", "import str, bytes import fileinput import argparse import os import sys import subprocess", "command in commands: commandlist.append(str(command).strip()) i += 1 if i % numberOfJob == 0:", "as slurmFile: print(options, file = slurmFile) if concurrent_job == 1: print('bash %s' %(commandFiles),", "= slurmFile) else: print('parallel -j%i :::: %s \\n' %(concurrent_job,commandFiles), file = slurmFile) with", "commandFiles = 'command_%i.bash' %commandRank options = \\ \"#!/bin/bash \\n\" +\\ \"#SBATCH -J %s", "if concurrent_job == 1: print('bash %s' %(commandFiles), file = slurmFile) else: print('parallel -j%i", "i, numberOfNode, allocation, queue, time, concurrent_job) commandRank += 1 print('Written %i scripts' %commandRank,", "unlimited\\n' +\\ \"export PATH=%s:$PATH\" %system_path with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile: print(options, file", "(hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)') parser.add_argument('-p','--processes', default=24,help='How many process to run", "%(numberOfNode)+\\ \"#SBATCH -n 24 # Total number of tasks %i\\n\" %(numberOfJob)+\\ \"#SBATCH -p", "time, concurrent_job) commandRank += 1 print('Written %i scripts' %commandRank, file = sys.stdout) return", "args.cmdlst jobname = args.jobname numberOfJob = args.numberOfCmd numberOfNode = args.numberOfNode allocation = args.allocation", "allocation = args.allocation queue = args.queue time = args.time concurrent_job = args.processes with", "'--cmdlst', help='A list of command, each line is a command', required=True) parser.add_argument('-j', '--jobname',", "'Exosome-RNA-seq'}) parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00') parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)')", "0 commandlist=[] if commandlist: writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation, queue, time, concurrent_job)", "= args.numberOfNode allocation = args.allocation queue = args.queue time = args.time concurrent_job =", "if i % numberOfJob == 0: writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue,", "slurmFile) with open(commandFiles,'w') as commandFile: print('\\n'.join(commandlist) + '\\n', file = commandFile) return 0", "import os import sys import subprocess python_path = subprocess.check_output(['which' ,'python']).decode('utf-8') system_path = os.path.dirname(python_path)" ]
[ "\"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2])", "self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self): \"\"\" # list = [start:<end:step] \"\"\" noms = ['peanut',", "# Based on AboutArrays in the Ruby Koans # from runner.koan import *", "colon to slice a list # list = [start:<end:step] \"\"\" noms = ['peanut',", "a colon to slice a list # list = [start:<end:step] \"\"\" noms =", "= [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2])", "'butter', 'and', 'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0])", "= ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3])", "'and', 'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__,", "# -*- coding: utf-8 -*- # # Based on AboutArrays in the Ruby", "['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__,", "utf-8 -*- # # Based on AboutArrays in the Ruby Koans # from", "runner.koan import * class AboutLists(Koan): def test_accessing_list_elements(self): noms = ['peanut', 'butter', 'and', 'jelly']", "python # -*- coding: utf-8 -*- # # Based on AboutArrays in the", "import * class AboutLists(Koan): def test_accessing_list_elements(self): noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__,", "Based on AboutArrays in the Ruby Koans # from runner.koan import * class", "noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0])", "self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self): \"\"\" # list = [start:<end:step]", "-*- coding: utf-8 -*- # # Based on AboutArrays in the Ruby Koans", "\"\"\" # list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__,", "noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self): \"\"\" # list =", "noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3]) def test_slicing_lists(self): \"\"\" Use a colon to slice", "Use a colon to slice a list # list = [start:<end:step] \"\"\" noms", "noms[-1]) self.assertEqual(__, noms[-3]) def test_slicing_lists(self): \"\"\" Use a colon to slice a list", "self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self): \"\"\" # list = [start:<end:step] \"\"\" noms", "test_slicing_to_the_edge(self): \"\"\" # list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly']", "AboutArrays in the Ruby Koans # from runner.koan import * class AboutLists(Koan): def", "slice a list # list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and',", "self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self): \"\"\"", "def test_accessing_list_elements(self): noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__,", "noms[-3]) def test_slicing_lists(self): \"\"\" Use a colon to slice a list # list", "= ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20])", "noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self): \"\"\" #", "'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100])", "Koans # from runner.koan import * class AboutLists(Koan): def test_accessing_list_elements(self): noms = ['peanut',", "def test_slicing_to_the_edge(self): \"\"\" # list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and',", "self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__,", "list # list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__,", "'butter', 'and', 'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3]) def test_slicing_lists(self):", "self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3]) def test_slicing_lists(self): \"\"\" Use a colon to", "noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__,", "def test_slicing_lists(self): \"\"\" Use a colon to slice a list # list =", "-*- # # Based on AboutArrays in the Ruby Koans # from runner.koan", "test_accessing_list_elements(self): noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1])", "self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0]) def", "['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3]) def", "noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__,", "Ruby Koans # from runner.koan import * class AboutLists(Koan): def test_accessing_list_elements(self): noms =", "list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__,", "from runner.koan import * class AboutLists(Koan): def test_accessing_list_elements(self): noms = ['peanut', 'butter', 'and',", "in the Ruby Koans # from runner.koan import * class AboutLists(Koan): def test_accessing_list_elements(self):", "'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3]) def test_slicing_lists(self): \"\"\" Use", "[start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0:1]) self.assertEqual(__, noms[0:2]) self.assertEqual(__,", "self.assertEqual(__, noms[-3]) def test_slicing_lists(self): \"\"\" Use a colon to slice a list #", "self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3]) def test_slicing_lists(self): \"\"\" Use a", "# from runner.koan import * class AboutLists(Koan): def test_accessing_list_elements(self): noms = ['peanut', 'butter',", "on AboutArrays in the Ruby Koans # from runner.koan import * class AboutLists(Koan):", "# list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0:1])", "= [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[2:]) self.assertEqual(__, noms[:2])", "# list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[2:])", "#!/usr/bin/env python # -*- coding: utf-8 -*- # # Based on AboutArrays in", "class AboutLists(Koan): def test_accessing_list_elements(self): noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__,", "* class AboutLists(Koan): def test_accessing_list_elements(self): noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0])", "AboutLists(Koan): def test_accessing_list_elements(self): noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3])", "self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3]) def test_slicing_lists(self): \"\"\" Use a colon to slice a", "self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self): \"\"\" # list", "list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly'] self.assertEqual(__, noms[2:]) self.assertEqual(__,", "# # Based on AboutArrays in the Ruby Koans # from runner.koan import", "'and', 'jelly'] self.assertEqual(__, noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3]) def test_slicing_lists(self): \"\"\"", "to slice a list # list = [start:<end:step] \"\"\" noms = ['peanut', 'butter',", "the Ruby Koans # from runner.koan import * class AboutLists(Koan): def test_accessing_list_elements(self): noms", "noms[4:100]) self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self): \"\"\" # list = [start:<end:step] \"\"\" noms =", "test_slicing_lists(self): \"\"\" Use a colon to slice a list # list = [start:<end:step]", "a list # list = [start:<end:step] \"\"\" noms = ['peanut', 'butter', 'and', 'jelly']", "noms[0]) self.assertEqual(__, noms[3]) self.assertEqual(__, noms[-1]) self.assertEqual(__, noms[-3]) def test_slicing_lists(self): \"\"\" Use a colon", "noms[5:0]) def test_slicing_to_the_edge(self): \"\"\" # list = [start:<end:step] \"\"\" noms = ['peanut', 'butter',", "noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self): \"\"\" # list = [start:<end:step] \"\"\"", "noms[0:2]) self.assertEqual(__, noms[2:2]) self.assertEqual(__, noms[2:20]) self.assertEqual(__, noms[4:0]) self.assertEqual(__, noms[4:100]) self.assertEqual(__, noms[5:0]) def test_slicing_to_the_edge(self):", "\"\"\" Use a colon to slice a list # list = [start:<end:step] \"\"\"", "<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- # # Based on AboutArrays", "coding: utf-8 -*- # # Based on AboutArrays in the Ruby Koans #" ]
[ "return token class Profile(models.Model, ModelToDicMiXin): \"\"\" location 目标城市 min_distance 最小查找范围 max_distance 最大查找范围 min_dating_age", "db_table = 'users' # def get_or_create_token(self): # \"\"\" # 为用户生成唯一的 token # :return:", "60 * 60) # # return token class Profile(models.Model, ModelToDicMiXin): \"\"\" location 目标城市", "self._profile, _ = Profile.objects.get_or_create(pk=self.id) return self._profile @property def to_dic(self): return { 'uid': self.id,", "default=0) birth_year = models.IntegerField(default=2000) birth_month = models.IntegerField(default=1) birth_day = models.IntegerField(default=1) avater = models.CharField(max_length=256)", "max_distance 最大查找范围 min_dating_age 最小交友年龄 max_dating_age 最大交友年龄 dating_sex 匹配的性别 auto_play 视频自动播放 user.profile.location \"\"\" location", "- self.birth_year age = age if date.month > self.birth_month and date.day > self.birth_day", "default='gz') min_distance = models.IntegerField(default=0) max_distance = models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81)", "libs.orm import ModelToDicMiXin SEXS = ( (0, '未知'), (1, '男'), (2, '女'), )", "self.birth_year age = age if date.month > self.birth_month and date.day > self.birth_day else", ":return: # \"\"\" # key = 'token:{}'.format(self.id) # # token = cache.get(key) #", "(0, '未知'), (1, '男'), (2, '女'), ) LOCATIONS = ( ('bj', '北京'), ('sh',", "models.IntegerField(default=2000) birth_month = models.IntegerField(default=1) birth_day = models.IntegerField(default=1) avater = models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz')", "self.sex, 'avater': self.avater, 'location': self.location, 'age': self.age, } class Meta: db_table = 'users'", "\"\"\" # 为用户生成唯一的 token # :return: # \"\"\" # key = 'token:{}'.format(self.id) #", "# \"\"\" # key = 'token:{}'.format(self.id) # # token = cache.get(key) # #", "# key = 'token:{}'.format(self.id) # # token = cache.get(key) # # if not", "birth_month 出生月 birth_day 出生日 avatar 个人形象 location 常居地 \"\"\" phonenum = models.CharField(max_length=11, unique=True)", "个人形象 location 常居地 \"\"\" phonenum = models.CharField(max_length=11, unique=True) nickname = models.CharField(max_length=16) sex =", "birth_day = models.IntegerField(default=1) avater = models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self): date", "= 'users' # def get_or_create_token(self): # \"\"\" # 为用户生成唯一的 token # :return: #", "not token: # token = 'token........<PASSWORD>' # cache.set(key, token, 24 * 60 *", "location = models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance = models.IntegerField(default=0) max_distance = models.IntegerField(default=10) min_dating_age =", "age @property def profile(self): if not hasattr(self, '_profile'): self._profile, _ = Profile.objects.get_or_create(pk=self.id) return", "= models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS, default=0) auto_play = models.BooleanField(default=True) class", "choices=LOCATIONS, default='gz') min_distance = models.IntegerField(default=0) max_distance = models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18) max_dating_age =", "'sex': self.sex, 'avater': self.avater, 'location': self.location, 'age': self.age, } class Meta: db_table =", "'users' # def get_or_create_token(self): # \"\"\" # 为用户生成唯一的 token # :return: # \"\"\"", "'token........<PASSWORD>' # cache.set(key, token, 24 * 60 * 60) # # return token", "self.phonenum, 'nickname': self.nickname, 'sex': self.sex, 'avater': self.avater, 'location': self.location, 'age': self.age, } class", "ModelToDicMiXin): \"\"\" location 目标城市 min_distance 最小查找范围 max_distance 最大查找范围 min_dating_age 最小交友年龄 max_dating_age 最大交友年龄 dating_sex", "self.nickname, 'sex': self.sex, 'avater': self.avater, 'location': self.location, 'age': self.age, } class Meta: db_table", "目标城市 min_distance 最小查找范围 max_distance 最大查找范围 min_dating_age 最小交友年龄 max_dating_age 最大交友年龄 dating_sex 匹配的性别 auto_play 视频自动播放", "token = cache.get(key) # # if not token: # token = 'token........<PASSWORD>' #", "avatar 个人形象 location 常居地 \"\"\" phonenum = models.CharField(max_length=11, unique=True) nickname = models.CharField(max_length=16) sex", "= models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance = models.IntegerField(default=0) max_distance = models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18)", "if date.month > self.birth_month and date.day > self.birth_day else age-1 return age @property", "token # :return: # \"\"\" # key = 'token:{}'.format(self.id) # # token =", "if not token: # token = 'token........<PASSWORD>' # cache.set(key, token, 24 * 60", "if not hasattr(self, '_profile'): self._profile, _ = Profile.objects.get_or_create(pk=self.id) return self._profile @property def to_dic(self):", "'age': self.age, } class Meta: db_table = 'users' # def get_or_create_token(self): # \"\"\"", "'token:{}'.format(self.id) # # token = cache.get(key) # # if not token: # token", "('sz', '深圳'), ('cd', '成都'), ('gz', '广州'), ) class User(models.Model): \"\"\" phonenum 手机号 nickname", "= models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self): date = datetime.date.today() age =", "= models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS, default=0) auto_play", "token = 'token........<PASSWORD>' # cache.set(key, token, 24 * 60 * 60) # #", "24 * 60 * 60) # # return token class Profile(models.Model, ModelToDicMiXin): \"\"\"", "class User(models.Model): \"\"\" phonenum 手机号 nickname 昵称 sex 性别 birth_year 出生年 birth_month 出生月", "最小交友年龄 max_dating_age 最大交友年龄 dating_sex 匹配的性别 auto_play 视频自动播放 user.profile.location \"\"\" location = models.CharField(max_length=32, choices=LOCATIONS,", "models.IntegerField(default=0) max_distance = models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS,", "sex = models.IntegerField(choices=SEXS, default=0) birth_year = models.IntegerField(default=2000) birth_month = models.IntegerField(default=1) birth_day = models.IntegerField(default=1)", "\"\"\" location = models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance = models.IntegerField(default=0) max_distance = models.IntegerField(default=10) min_dating_age", "('hz', '杭州'), ('sz', '深圳'), ('cd', '成都'), ('gz', '广州'), ) class User(models.Model): \"\"\" phonenum", "'女'), ) LOCATIONS = ( ('bj', '北京'), ('sh', '上海'), ('hz', '杭州'), ('sz', '深圳'),", "匹配的性别 auto_play 视频自动播放 user.profile.location \"\"\" location = models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance = models.IntegerField(default=0)", "self.id, 'phonenum': self.phonenum, 'nickname': self.nickname, 'sex': self.sex, 'avater': self.avater, 'location': self.location, 'age': self.age,", "import ModelToDicMiXin SEXS = ( (0, '未知'), (1, '男'), (2, '女'), ) LOCATIONS", "and date.day > self.birth_day else age-1 return age @property def profile(self): if not", "self.birth_month and date.day > self.birth_day else age-1 return age @property def profile(self): if", "# # if not token: # token = 'token........<PASSWORD>' # cache.set(key, token, 24", "'_profile'): self._profile, _ = Profile.objects.get_or_create(pk=self.id) return self._profile @property def to_dic(self): return { 'uid':", "'uid': self.id, 'phonenum': self.phonenum, 'nickname': self.nickname, 'sex': self.sex, 'avater': self.avater, 'location': self.location, 'age':", "'成都'), ('gz', '广州'), ) class User(models.Model): \"\"\" phonenum 手机号 nickname 昵称 sex 性别", "unique=True) nickname = models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS, default=0) birth_year = models.IntegerField(default=2000) birth_month =", "SEXS = ( (0, '未知'), (1, '男'), (2, '女'), ) LOCATIONS = (", "return { 'uid': self.id, 'phonenum': self.phonenum, 'nickname': self.nickname, 'sex': self.sex, 'avater': self.avater, 'location':", "models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS, default=0) birth_year = models.IntegerField(default=2000) birth_month = models.IntegerField(default=1) birth_day =", "'广州'), ) class User(models.Model): \"\"\" phonenum 手机号 nickname 昵称 sex 性别 birth_year 出生年", "avater = models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self): date = datetime.date.today() age", "ModelToDicMiXin SEXS = ( (0, '未知'), (1, '男'), (2, '女'), ) LOCATIONS =", "= models.CharField(max_length=11, unique=True) nickname = models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS, default=0) birth_year = models.IntegerField(default=2000)", ") LOCATIONS = ( ('bj', '北京'), ('sh', '上海'), ('hz', '杭州'), ('sz', '深圳'), ('cd',", "nickname = models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS, default=0) birth_year = models.IntegerField(default=2000) birth_month = models.IntegerField(default=1)", "( ('bj', '北京'), ('sh', '上海'), ('hz', '杭州'), ('sz', '深圳'), ('cd', '成都'), ('gz', '广州'),", "location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self): date = datetime.date.today() age = date.year -", "= models.IntegerField(choices=SEXS, default=0) birth_year = models.IntegerField(default=2000) birth_month = models.IntegerField(default=1) birth_day = models.IntegerField(default=1) avater", "date = datetime.date.today() age = date.year - self.birth_year age = age if date.month", "'未知'), (1, '男'), (2, '女'), ) LOCATIONS = ( ('bj', '北京'), ('sh', '上海'),", "('bj', '北京'), ('sh', '上海'), ('hz', '杭州'), ('sz', '深圳'), ('cd', '成都'), ('gz', '广州'), )", "性别 birth_year 出生年 birth_month 出生月 birth_day 出生日 avatar 个人形象 location 常居地 \"\"\" phonenum", "birth_month = models.IntegerField(default=1) birth_day = models.IntegerField(default=1) avater = models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property", "models.IntegerField(choices=SEXS, default=0) birth_year = models.IntegerField(default=2000) birth_month = models.IntegerField(default=1) birth_day = models.IntegerField(default=1) avater =", "min_distance = models.IntegerField(default=0) max_distance = models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81) dating_sex", "出生年 birth_month 出生月 birth_day 出生日 avatar 个人形象 location 常居地 \"\"\" phonenum = models.CharField(max_length=11,", "birth_year 出生年 birth_month 出生月 birth_day 出生日 avatar 个人形象 location 常居地 \"\"\" phonenum =", "= datetime.date.today() age = date.year - self.birth_year age = age if date.month >", "@property def to_dic(self): return { 'uid': self.id, 'phonenum': self.phonenum, 'nickname': self.nickname, 'sex': self.sex,", "# if not token: # token = 'token........<PASSWORD>' # cache.set(key, token, 24 *", "\"\"\" phonenum = models.CharField(max_length=11, unique=True) nickname = models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS, default=0) birth_year", "@property def profile(self): if not hasattr(self, '_profile'): self._profile, _ = Profile.objects.get_or_create(pk=self.id) return self._profile", "token: # token = 'token........<PASSWORD>' # cache.set(key, token, 24 * 60 * 60)", "hasattr(self, '_profile'): self._profile, _ = Profile.objects.get_or_create(pk=self.id) return self._profile @property def to_dic(self): return {", "# return token class Profile(models.Model, ModelToDicMiXin): \"\"\" location 目标城市 min_distance 最小查找范围 max_distance 最大查找范围", "* 60) # # return token class Profile(models.Model, ModelToDicMiXin): \"\"\" location 目标城市 min_distance", "> self.birth_day else age-1 return age @property def profile(self): if not hasattr(self, '_profile'):", "= 'token:{}'.format(self.id) # # token = cache.get(key) # # if not token: #", "models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance = models.IntegerField(default=0) max_distance = models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18) max_dating_age", "min_distance 最小查找范围 max_distance 最大查找范围 min_dating_age 最小交友年龄 max_dating_age 最大交友年龄 dating_sex 匹配的性别 auto_play 视频自动播放 user.profile.location", "'上海'), ('hz', '杭州'), ('sz', '深圳'), ('cd', '成都'), ('gz', '广州'), ) class User(models.Model): \"\"\"", "# cache.set(key, token, 24 * 60 * 60) # # return token class", "date.day > self.birth_day else age-1 return age @property def profile(self): if not hasattr(self,", "最大查找范围 min_dating_age 最小交友年龄 max_dating_age 最大交友年龄 dating_sex 匹配的性别 auto_play 视频自动播放 user.profile.location \"\"\" location =", "else age-1 return age @property def profile(self): if not hasattr(self, '_profile'): self._profile, _", "\"\"\" phonenum 手机号 nickname 昵称 sex 性别 birth_year 出生年 birth_month 出生月 birth_day 出生日", "max_distance = models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS, default=0)", "( (0, '未知'), (1, '男'), (2, '女'), ) LOCATIONS = ( ('bj', '北京'),", "phonenum = models.CharField(max_length=11, unique=True) nickname = models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS, default=0) birth_year =", "models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS, default=0) auto_play =", "datetime from django.db import models from libs.orm import ModelToDicMiXin SEXS = ( (0,", "def get_or_create_token(self): # \"\"\" # 为用户生成唯一的 token # :return: # \"\"\" # key", "# token = cache.get(key) # # if not token: # token = 'token........<PASSWORD>'", "= 'token........<PASSWORD>' # cache.set(key, token, 24 * 60 * 60) # # return", "birth_day 出生日 avatar 个人形象 location 常居地 \"\"\" phonenum = models.CharField(max_length=11, unique=True) nickname =", "= models.IntegerField(default=1) birth_day = models.IntegerField(default=1) avater = models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def", "to_dic(self): return { 'uid': self.id, 'phonenum': self.phonenum, 'nickname': self.nickname, 'sex': self.sex, 'avater': self.avater,", "('cd', '成都'), ('gz', '广州'), ) class User(models.Model): \"\"\" phonenum 手机号 nickname 昵称 sex", "'location': self.location, 'age': self.age, } class Meta: db_table = 'users' # def get_or_create_token(self):", "max_dating_age 最大交友年龄 dating_sex 匹配的性别 auto_play 视频自动播放 user.profile.location \"\"\" location = models.CharField(max_length=32, choices=LOCATIONS, default='gz')", "视频自动播放 user.profile.location \"\"\" location = models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance = models.IntegerField(default=0) max_distance =", "最小查找范围 max_distance 最大查找范围 min_dating_age 最小交友年龄 max_dating_age 最大交友年龄 dating_sex 匹配的性别 auto_play 视频自动播放 user.profile.location \"\"\"", "= models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self): date = datetime.date.today() age = date.year - self.birth_year", "= models.IntegerField(default=0) max_distance = models.IntegerField(default=10) min_dating_age = models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81) dating_sex =", "datetime.date.today() age = date.year - self.birth_year age = age if date.month > self.birth_month", "= models.IntegerField(default=1) avater = models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self): date =", "\"\"\" # key = 'token:{}'.format(self.id) # # token = cache.get(key) # # if", "# # return token class Profile(models.Model, ModelToDicMiXin): \"\"\" location 目标城市 min_distance 最小查找范围 max_distance", "60) # # return token class Profile(models.Model, ModelToDicMiXin): \"\"\" location 目标城市 min_distance 最小查找范围", "常居地 \"\"\" phonenum = models.CharField(max_length=11, unique=True) nickname = models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS, default=0)", "models.CharField(max_length=11, unique=True) nickname = models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS, default=0) birth_year = models.IntegerField(default=2000) birth_month", "# # token = cache.get(key) # # if not token: # token =", "return self._profile @property def to_dic(self): return { 'uid': self.id, 'phonenum': self.phonenum, 'nickname': self.nickname,", "profile(self): if not hasattr(self, '_profile'): self._profile, _ = Profile.objects.get_or_create(pk=self.id) return self._profile @property def", "(1, '男'), (2, '女'), ) LOCATIONS = ( ('bj', '北京'), ('sh', '上海'), ('hz',", "import models from libs.orm import ModelToDicMiXin SEXS = ( (0, '未知'), (1, '男'),", "'avater': self.avater, 'location': self.location, 'age': self.age, } class Meta: db_table = 'users' #", "LOCATIONS = ( ('bj', '北京'), ('sh', '上海'), ('hz', '杭州'), ('sz', '深圳'), ('cd', '成都'),", "import datetime from django.db import models from libs.orm import ModelToDicMiXin SEXS = (", "出生日 avatar 个人形象 location 常居地 \"\"\" phonenum = models.CharField(max_length=11, unique=True) nickname = models.CharField(max_length=16)", "def profile(self): if not hasattr(self, '_profile'): self._profile, _ = Profile.objects.get_or_create(pk=self.id) return self._profile @property", "昵称 sex 性别 birth_year 出生年 birth_month 出生月 birth_day 出生日 avatar 个人形象 location 常居地", "sex 性别 birth_year 出生年 birth_month 出生月 birth_day 出生日 avatar 个人形象 location 常居地 \"\"\"", "= age if date.month > self.birth_month and date.day > self.birth_day else age-1 return", "{ 'uid': self.id, 'phonenum': self.phonenum, 'nickname': self.nickname, 'sex': self.sex, 'avater': self.avater, 'location': self.location,", "def age(self): date = datetime.date.today() age = date.year - self.birth_year age = age", "class Meta: db_table = 'users' # def get_or_create_token(self): # \"\"\" # 为用户生成唯一的 token", "= models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS, default=0) auto_play = models.BooleanField(default=True) class Meta: db_table =", "# token = 'token........<PASSWORD>' # cache.set(key, token, 24 * 60 * 60) #", "location 目标城市 min_distance 最小查找范围 max_distance 最大查找范围 min_dating_age 最小交友年龄 max_dating_age 最大交友年龄 dating_sex 匹配的性别 auto_play", "'杭州'), ('sz', '深圳'), ('cd', '成都'), ('gz', '广州'), ) class User(models.Model): \"\"\" phonenum 手机号", "models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS, default=0) auto_play = models.BooleanField(default=True) class Meta: db_table = 'profiles'", "django.db import models from libs.orm import ModelToDicMiXin SEXS = ( (0, '未知'), (1,", "auto_play 视频自动播放 user.profile.location \"\"\" location = models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance = models.IntegerField(default=0) max_distance", "models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS, default=0) auto_play = models.BooleanField(default=True) class Meta:", "(2, '女'), ) LOCATIONS = ( ('bj', '北京'), ('sh', '上海'), ('hz', '杭州'), ('sz',", "cache.set(key, token, 24 * 60 * 60) # # return token class Profile(models.Model,", "models from libs.orm import ModelToDicMiXin SEXS = ( (0, '未知'), (1, '男'), (2,", "birth_year = models.IntegerField(default=2000) birth_month = models.IntegerField(default=1) birth_day = models.IntegerField(default=1) avater = models.CharField(max_length=256) location", "self._profile @property def to_dic(self): return { 'uid': self.id, 'phonenum': self.phonenum, 'nickname': self.nickname, 'sex':", "= ( (0, '未知'), (1, '男'), (2, '女'), ) LOCATIONS = ( ('bj',", "class Profile(models.Model, ModelToDicMiXin): \"\"\" location 目标城市 min_distance 最小查找范围 max_distance 最大查找范围 min_dating_age 最小交友年龄 max_dating_age", ") class User(models.Model): \"\"\" phonenum 手机号 nickname 昵称 sex 性别 birth_year 出生年 birth_month", "_ = Profile.objects.get_or_create(pk=self.id) return self._profile @property def to_dic(self): return { 'uid': self.id, 'phonenum':", "token class Profile(models.Model, ModelToDicMiXin): \"\"\" location 目标城市 min_distance 最小查找范围 max_distance 最大查找范围 min_dating_age 最小交友年龄", "# 为用户生成唯一的 token # :return: # \"\"\" # key = 'token:{}'.format(self.id) # #", "user.profile.location \"\"\" location = models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance = models.IntegerField(default=0) max_distance = models.IntegerField(default=10)", "not hasattr(self, '_profile'): self._profile, _ = Profile.objects.get_or_create(pk=self.id) return self._profile @property def to_dic(self): return", "# \"\"\" # 为用户生成唯一的 token # :return: # \"\"\" # key = 'token:{}'.format(self.id)", "# :return: # \"\"\" # key = 'token:{}'.format(self.id) # # token = cache.get(key)", "Meta: db_table = 'users' # def get_or_create_token(self): # \"\"\" # 为用户生成唯一的 token #", "'phonenum': self.phonenum, 'nickname': self.nickname, 'sex': self.sex, 'avater': self.avater, 'location': self.location, 'age': self.age, }", "Profile(models.Model, ModelToDicMiXin): \"\"\" location 目标城市 min_distance 最小查找范围 max_distance 最大查找范围 min_dating_age 最小交友年龄 max_dating_age 最大交友年龄", "Profile.objects.get_or_create(pk=self.id) return self._profile @property def to_dic(self): return { 'uid': self.id, 'phonenum': self.phonenum, 'nickname':", "# def get_or_create_token(self): # \"\"\" # 为用户生成唯一的 token # :return: # \"\"\" #", "date.month > self.birth_month and date.day > self.birth_day else age-1 return age @property def", "= models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS, default=0) birth_year = models.IntegerField(default=2000) birth_month = models.IntegerField(default=1) birth_day", "phonenum 手机号 nickname 昵称 sex 性别 birth_year 出生年 birth_month 出生月 birth_day 出生日 avatar", "age(self): date = datetime.date.today() age = date.year - self.birth_year age = age if", "手机号 nickname 昵称 sex 性别 birth_year 出生年 birth_month 出生月 birth_day 出生日 avatar 个人形象", "self.birth_day else age-1 return age @property def profile(self): if not hasattr(self, '_profile'): self._profile,", "from django.db import models from libs.orm import ModelToDicMiXin SEXS = ( (0, '未知'),", "self.avater, 'location': self.location, 'age': self.age, } class Meta: db_table = 'users' # def", "为用户生成唯一的 token # :return: # \"\"\" # key = 'token:{}'.format(self.id) # # token", "key = 'token:{}'.format(self.id) # # token = cache.get(key) # # if not token:", "= cache.get(key) # # if not token: # token = 'token........<PASSWORD>' # cache.set(key,", "max_dating_age = models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS, default=0) auto_play = models.BooleanField(default=True) class Meta: db_table", "min_dating_age = models.IntegerField(default=18) max_dating_age = models.IntegerField(default=81) dating_sex = models.IntegerField(choices=SEXS, default=0) auto_play = models.BooleanField(default=True)", "= date.year - self.birth_year age = age if date.month > self.birth_month and date.day", "age = age if date.month > self.birth_month and date.day > self.birth_day else age-1", "nickname 昵称 sex 性别 birth_year 出生年 birth_month 出生月 birth_day 出生日 avatar 个人形象 location", "get_or_create_token(self): # \"\"\" # 为用户生成唯一的 token # :return: # \"\"\" # key =", "('sh', '上海'), ('hz', '杭州'), ('sz', '深圳'), ('cd', '成都'), ('gz', '广州'), ) class User(models.Model):", "def to_dic(self): return { 'uid': self.id, 'phonenum': self.phonenum, 'nickname': self.nickname, 'sex': self.sex, 'avater':", "self.location, 'age': self.age, } class Meta: db_table = 'users' # def get_or_create_token(self): #", "最大交友年龄 dating_sex 匹配的性别 auto_play 视频自动播放 user.profile.location \"\"\" location = models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance", "User(models.Model): \"\"\" phonenum 手机号 nickname 昵称 sex 性别 birth_year 出生年 birth_month 出生月 birth_day", "age = date.year - self.birth_year age = age if date.month > self.birth_month and", "} class Meta: db_table = 'users' # def get_or_create_token(self): # \"\"\" # 为用户生成唯一的", "\"\"\" location 目标城市 min_distance 最小查找范围 max_distance 最大查找范围 min_dating_age 最小交友年龄 max_dating_age 最大交友年龄 dating_sex 匹配的性别", "location 常居地 \"\"\" phonenum = models.CharField(max_length=11, unique=True) nickname = models.CharField(max_length=16) sex = models.IntegerField(choices=SEXS,", "self.age, } class Meta: db_table = 'users' # def get_or_create_token(self): # \"\"\" #", "token, 24 * 60 * 60) # # return token class Profile(models.Model, ModelToDicMiXin):", "@property def age(self): date = datetime.date.today() age = date.year - self.birth_year age =", "cache.get(key) # # if not token: # token = 'token........<PASSWORD>' # cache.set(key, token,", "age if date.month > self.birth_month and date.day > self.birth_day else age-1 return age", "age-1 return age @property def profile(self): if not hasattr(self, '_profile'): self._profile, _ =", "from libs.orm import ModelToDicMiXin SEXS = ( (0, '未知'), (1, '男'), (2, '女'),", "models.IntegerField(default=1) birth_day = models.IntegerField(default=1) avater = models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self):", "'男'), (2, '女'), ) LOCATIONS = ( ('bj', '北京'), ('sh', '上海'), ('hz', '杭州'),", "= ( ('bj', '北京'), ('sh', '上海'), ('hz', '杭州'), ('sz', '深圳'), ('cd', '成都'), ('gz',", "dating_sex 匹配的性别 auto_play 视频自动播放 user.profile.location \"\"\" location = models.CharField(max_length=32, choices=LOCATIONS, default='gz') min_distance =", "models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self): date = datetime.date.today() age = date.year", "min_dating_age 最小交友年龄 max_dating_age 最大交友年龄 dating_sex 匹配的性别 auto_play 视频自动播放 user.profile.location \"\"\" location = models.CharField(max_length=32,", "= Profile.objects.get_or_create(pk=self.id) return self._profile @property def to_dic(self): return { 'uid': self.id, 'phonenum': self.phonenum,", "models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self): date = datetime.date.today() age = date.year - self.birth_year age", "* 60 * 60) # # return token class Profile(models.Model, ModelToDicMiXin): \"\"\" location", "> self.birth_month and date.day > self.birth_day else age-1 return age @property def profile(self):", "'深圳'), ('cd', '成都'), ('gz', '广州'), ) class User(models.Model): \"\"\" phonenum 手机号 nickname 昵称", "'北京'), ('sh', '上海'), ('hz', '杭州'), ('sz', '深圳'), ('cd', '成都'), ('gz', '广州'), ) class", "= models.IntegerField(default=2000) birth_month = models.IntegerField(default=1) birth_day = models.IntegerField(default=1) avater = models.CharField(max_length=256) location =", "return age @property def profile(self): if not hasattr(self, '_profile'): self._profile, _ = Profile.objects.get_or_create(pk=self.id)", "'nickname': self.nickname, 'sex': self.sex, 'avater': self.avater, 'location': self.location, 'age': self.age, } class Meta:", "models.IntegerField(default=1) avater = models.CharField(max_length=256) location = models.CharField(choices=LOCATIONS,max_length=32,default='gz') @property def age(self): date = datetime.date.today()", "('gz', '广州'), ) class User(models.Model): \"\"\" phonenum 手机号 nickname 昵称 sex 性别 birth_year", "出生月 birth_day 出生日 avatar 个人形象 location 常居地 \"\"\" phonenum = models.CharField(max_length=11, unique=True) nickname", "date.year - self.birth_year age = age if date.month > self.birth_month and date.day >" ]
[ "Check logposterior is working fine priorparams = np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams) priorparams =", ":] chains_param = chains_param[:, int(0.5 * n_iter)::5, :] transform_x0 = transform_x0_list[0] x0 =", "import os import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt", "%s [str:which_model]' % os.path.basename(__file__)) sys.exit() if which_model not in model_list: raise ValueError('Input model", "i in fit_idx: f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i) p =", "for i in fit_idx: f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i) p", "assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) # Load fitting results calloaddir = './out/' + info_id load_seed", "data[:, 0] data = data[:, 1] noise_sigma = np.std(data[:500]) print('Estimated noise level: ',", "= protocol[:, 0] protocol = protocol[:, 1] # Control fitting seed # fit_seed", "Load data data = np.loadtxt(data_dir + '/' + data_file_name, delimiter=',', skiprows=1) # headers", "Load fitting results calloaddir = './out/' + info_id load_seed = 542811797 fit_idx =", "(savedir, saveas), *chains_param) # Plot # burn in and thinning chains_final = chains[:,", "parameter transformation transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param # Load data data =", "info_id = 'model_%s' % which_model info = importlib.import_module(info_id) data_dir = './data' savedir =", "thinning chains_final = chains[:, int(0.5 * n_iter)::5, :] chains_param = chains_param[:, int(0.5 *", "print('Posterior at prior parameters: ', logposterior(transform_priorparams)) for _ in range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams))", "'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, } # Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create Pints", "np.loadtxt(data_dir + '/' + data_file_name, delimiter=',', skiprows=1) # headers times = data[:, 0]", "= np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1])) # Run mcmc = pints.MCMCController(logposterior,", "import importlib sys.path.append('./mmt-model-files') info_id = 'model_%s' % which_model info = importlib.import_module(info_id) data_dir =", "m import parametertransform import priors \"\"\" Run fit. \"\"\" model_list = ['A', 'B',", "data_file_name, delimiter=',', skiprows=1) # headers times = data[:, 0] data = data[:, 1]", "ValueError('Input model %s is not available in the model list' \\ % which_model)", "logposterior = pints.LogPosterior(loglikelihood, logprior) # Check logposterior is working fine priorparams = np.copy(info.base_param)", "noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1])) # Run mcmc = pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC)", "del(c_tmp) # Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param) # Plot #", "info_id if not os.path.isdir(savedir): os.makedirs(savedir) data_file_name = 'data-sinewave.csv' print('Fitting to ', data_file_name) print('Temperature:", "at prior parameters: ', logposterior(transform_priorparams)) for _ in range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) #", "info_id + '-' + data_file_name[5:][:-4] # Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times", "info_id load_seed = 542811797 fit_idx = [1, 2, 3] transform_x0_list = [] print('MCMC", "seed # fit_seed = np.random.randint(0, 2**30) fit_seed = 542811797 print('Fit seed: ', fit_seed)", "try: which_model = sys.argv[1] except: print('Usage: python %s [str:which_model]' % os.path.basename(__file__)) sys.exit() if", "# Set parameter transformation transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param # Load data", "transform_priorparams = np.append(transform_priorparams, noise_sigma) print('Posterior at prior parameters: ', logposterior(transform_priorparams)) for _ in", "n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas)) chains = mcmc.run()", "= np.copy(c) chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :, -1] = c_tmp[:,", "is working fine priorparams = np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams) priorparams = np.append(priorparams, noise_sigma)", "headers times = data[:, 0] data = data[:, 1] noise_sigma = np.std(data[:500]) print('Estimated", "np.append(priorparams, noise_sigma) transform_priorparams = np.append(transform_priorparams, noise_sigma) print('Posterior at prior parameters: ', logposterior(transform_priorparams)) for", "m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 + info.temperature, # K ) LogPrior =", "parameters chains_param = np.zeros(chains.shape) for i, c in enumerate(chains): c_tmp = np.copy(c) chains_param[i,", "numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pints import", "variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 + info.temperature, # K ) LogPrior = {", "mcmc = pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter = 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter))", "= c_tmp[:, -1] del(c_tmp) # Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param)", "results calloaddir = './out/' + info_id load_seed = 542811797 fit_idx = [1, 2,", "K ) LogPrior = { 'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, } # Update protocol", "0] protocol = protocol[:, 1] # Control fitting seed # fit_seed = np.random.randint(0,", "data = np.loadtxt(data_dir + '/' + data_file_name, delimiter=',', skiprows=1) # headers times =", "priorparams = np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams) priorparams = np.append(priorparams, noise_sigma) transform_priorparams = np.append(transform_priorparams,", "transform_x0 = transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir,", "noise level: ', noise_sigma) # Model model = m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param,", ":, :-1] = transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :, -1] = c_tmp[:, -1] del(c_tmp) #", "transform_x0_list, method=pints.PopulationMCMC) n_iter = 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir,", "logposterior is working fine priorparams = np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams) priorparams = np.append(priorparams,", "protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times = protocol[:, 0] protocol = protocol[:, 1]", "# fit_seed = np.random.randint(0, 2**30) fit_seed = 542811797 print('Fit seed: ', fit_seed) np.random.seed(fit_seed)", "skiprows=1, delimiter=',') protocol_times = protocol[:, 0] protocol = protocol[:, 1] # Control fitting", "np.append(transform_priorparams, noise_sigma) print('Posterior at prior parameters: ', logposterior(transform_priorparams)) for _ in range(10): assert(logposterior(transform_priorparams)", "# burn in and thinning chains_final = chains[:, int(0.5 * n_iter)::5, :] chains_param", "version) pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param) # Plot # burn in and thinning", "savedir = './out/mcmc-' + info_id if not os.path.isdir(savedir): os.makedirs(savedir) data_file_name = 'data-sinewave.csv' print('Fitting", "protocol_times) # Create Pints stuffs problem = pints.SingleOutputProblem(model, times, data) loglikelihood = pints.GaussianLogLikelihood(problem)", "np.random.seed(fit_seed) # Set parameter transformation transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param # Load", "data) loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma],", "prior parameters: ', logposterior(transform_priorparams)) for _ in range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) # Load", "Run fit. \"\"\" model_list = ['A', 'B', 'C'] try: which_model = sys.argv[1] except:", "'./data' savedir = './out/mcmc-' + info_id if not os.path.isdir(savedir): os.makedirs(savedir) data_file_name = 'data-sinewave.csv'", "transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior)", "= transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir, saveas))", "= data[:, 1] noise_sigma = np.std(data[:500]) print('Estimated noise level: ', noise_sigma) # Model", "logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma]) logprior", "int(0.5 * n_iter)::5, :] transform_x0 = transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False,", "variables import importlib sys.path.append('./mmt-model-files') info_id = 'model_%s' % which_model info = importlib.import_module(info_id) data_dir", "% (savedir, saveas), *chains_param) # Plot # burn in and thinning chains_final =", "(savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas)) chains = mcmc.run() # De-transform parameters chains_param", "model list' \\ % which_model) # Get all input variables import importlib sys.path.append('./mmt-model-files')", "model_list: raise ValueError('Input model %s is not available in the model list' \\", "= info_id + '-' + data_file_name[5:][:-4] # Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',')", "= np.loadtxt(data_dir + '/' + data_file_name, delimiter=',', skiprows=1) # headers times = data[:,", "* noise_sigma], [10. * noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior = pints.LogPosterior(loglikelihood, logprior)", "\\ % which_model) # Get all input variables import importlib sys.path.append('./mmt-model-files') info_id =", "noise_sigma], [10. * noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior = pints.LogPosterior(loglikelihood, logprior) #", "print('Usage: python %s [str:which_model]' % os.path.basename(__file__)) sys.exit() if which_model not in model_list: raise", "= pints.SingleOutputProblem(model, times, data) loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior =", "fitting results calloaddir = './out/' + info_id load_seed = 542811797 fit_idx = [1,", "+ data_file_name, delimiter=',', skiprows=1) # headers times = data[:, 0] data = data[:,", "mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas)) chains = mcmc.run() # De-transform", "print('Fit seed: ', fit_seed) np.random.seed(fit_seed) # Set parameter transformation transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param", "= data[:, 0] data = data[:, 1] noise_sigma = np.std(data[:500]) print('Estimated noise level:", "Create Pints stuffs problem = pints.SingleOutputProblem(model, times, data) loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior =", "saveas)) chains = mcmc.run() # De-transform parameters chains_param = np.zeros(chains.shape) for i, c", "pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter = 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv'", "import pints.io import pints.plot import model as m import parametertransform import priors \"\"\"", "model = m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 + info.temperature, # K )", ":-1] = transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :, -1] = c_tmp[:, -1] del(c_tmp) # Save", "= chains[:, int(0.5 * n_iter)::5, :] chains_param = chains_param[:, int(0.5 * n_iter)::5, :]", "= 542811797 fit_idx = [1, 2, 3] transform_x0_list = [] print('MCMC starting point:", "os.path.basename(__file__)) sys.exit() if which_model not in model_list: raise ValueError('Input model %s is not", "chains_param = chains_param[:, int(0.5 * n_iter)::5, :] transform_x0 = transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]),", "except: print('Usage: python %s [str:which_model]' % os.path.basename(__file__)) sys.exit() if which_model not in model_list:", "+ info.temperature, # K ) LogPrior = { 'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, }", "n_iter)::5, :] chains_param = chains_param[:, int(0.5 * n_iter)::5, :] transform_x0 = transform_x0_list[0] x0", "', noise_sigma) # Model model = m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 +", "{ 'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, } # Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create", "= np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams) priorparams = np.append(priorparams, noise_sigma) transform_priorparams = np.append(transform_priorparams, noise_sigma)", "fit. \"\"\" model_list = ['A', 'B', 'C'] try: which_model = sys.argv[1] except: print('Usage:", "= np.zeros(chains.shape) for i, c in enumerate(chains): c_tmp = np.copy(c) chains_param[i, :, :-1]", "} # Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create Pints stuffs problem = pints.SingleOutputProblem(model,", "= 'data-sinewave.csv' print('Fitting to ', data_file_name) print('Temperature: ', info.temperature) saveas = info_id +", "= np.std(data[:500]) print('Estimated noise level: ', noise_sigma) # Model model = m.Model(info.model_file, variables=info.parameters,", "'data-sinewave.csv' print('Fitting to ', data_file_name) print('Temperature: ', info.temperature) saveas = info_id + '-'", "sys sys.path.append('./method') import os import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot", "ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir, saveas)) plt.close('all') pints.plot.trace(chains_param, ref_parameters=x0) plt.savefig('%s/%s-fig2.png' % (savedir, saveas)) plt.close('all')", "= np.append(priorparams, noise_sigma) transform_priorparams = np.append(transform_priorparams, noise_sigma) print('Posterior at prior parameters: ', logposterior(transform_priorparams))", "info.temperature) saveas = info_id + '-' + data_file_name[5:][:-4] # Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv',", "print('Fitting to ', data_file_name) print('Temperature: ', info.temperature) saveas = info_id + '-' +", "len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter = 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' %", "delimiter=',', skiprows=1) # headers times = data[:, 0] data = data[:, 1] noise_sigma", "np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1])) # Run mcmc = pints.MCMCController(logposterior, len(transform_x0_list),", "chains_final = chains[:, int(0.5 * n_iter)::5, :] chains_param = chains_param[:, int(0.5 * n_iter)::5,", "chains = mcmc.run() # De-transform parameters chains_param = np.zeros(chains.shape) for i, c in", "np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir, saveas)) plt.close('all') pints.plot.trace(chains_param, ref_parameters=x0) plt.savefig('%s/%s-fig2.png'", "'B', 'C'] try: which_model = sys.argv[1] except: print('Usage: python %s [str:which_model]' % os.path.basename(__file__))", "in and thinning chains_final = chains[:, int(0.5 * n_iter)::5, :] chains_param = chains_param[:,", "%s is not available in the model list' \\ % which_model) # Get", "fit_seed = np.random.randint(0, 2**30) fit_seed = 542811797 print('Fit seed: ', fit_seed) np.random.seed(fit_seed) #", "i) p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1])) # Run mcmc", "% (savedir, saveas)) chains = mcmc.run() # De-transform parameters chains_param = np.zeros(chains.shape) for", "import priors \"\"\" Run fit. \"\"\" model_list = ['A', 'B', 'C'] try: which_model", "set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 + info.temperature, # K ) LogPrior = { 'model_A': priors.ModelALogPrior,", "protocol = protocol[:, 1] # Control fitting seed # fit_seed = np.random.randint(0, 2**30)", "to ', data_file_name) print('Temperature: ', info.temperature) saveas = info_id + '-' + data_file_name[5:][:-4]", "python %s [str:which_model]' % os.path.basename(__file__)) sys.exit() if which_model not in model_list: raise ValueError('Input", "pints.SingleOutputProblem(model, times, data) loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1", "+ '-' + data_file_name[5:][:-4] # Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times =", "542811797 fit_idx = [1, 2, 3] transform_x0_list = [] print('MCMC starting point: ')", "loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10.", "import sys sys.path.append('./method') import os import numpy as np import matplotlib matplotlib.use('Agg') import", "logposterior(transform_priorparams)) for _ in range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) # Load fitting results calloaddir", "as plt import pints import pints.io import pints.plot import model as m import", "fit_idx: f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i) p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p),", "n_iter = 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv'", "= transform_from_model_param(priorparams) priorparams = np.append(priorparams, noise_sigma) transform_priorparams = np.append(transform_priorparams, noise_sigma) print('Posterior at prior", "# Load fitting results calloaddir = './out/' + info_id load_seed = 542811797 fit_idx", "% (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas)) chains = mcmc.run() # De-transform parameters", "np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams) priorparams = np.append(priorparams, noise_sigma) transform_priorparams = np.append(transform_priorparams, noise_sigma) print('Posterior", "from __future__ import print_function import sys sys.path.append('./method') import os import numpy as np", "i, c in enumerate(chains): c_tmp = np.copy(c) chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1])", "* n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas)) chains =", "protocol[:, 0] protocol = protocol[:, 1] # Control fitting seed # fit_seed =", "# Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create Pints stuffs problem = pints.SingleOutputProblem(model, times,", "available in the model list' \\ % which_model) # Get all input variables", "if not os.path.isdir(savedir): os.makedirs(savedir) data_file_name = 'data-sinewave.csv' print('Fitting to ', data_file_name) print('Temperature: ',", "matplotlib.use('Agg') import matplotlib.pyplot as plt import pints import pints.io import pints.plot import model", "the model list' \\ % which_model) # Get all input variables import importlib", "as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pints import pints.io", "importlib.import_module(info_id) data_dir = './data' savedir = './out/mcmc-' + info_id if not os.path.isdir(savedir): os.makedirs(savedir)", "load_seed = 542811797 fit_idx = [1, 2, 3] transform_x0_list = [] print('MCMC starting", "+ info_id load_seed = 542811797 fit_idx = [1, 2, 3] transform_x0_list = []", "\"\"\" model_list = ['A', 'B', 'C'] try: which_model = sys.argv[1] except: print('Usage: python", "= pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior =", "logposterior(transform_x0_list[-1])) # Run mcmc = pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter = 100000 mcmc.set_max_iterations(n_iter)", "for _ in range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) # Load fitting results calloaddir =", "model_list = ['A', 'B', 'C'] try: which_model = sys.argv[1] except: print('Usage: python %s", "plt import pints import pints.io import pints.plot import model as m import parametertransform", "which_model not in model_list: raise ValueError('Input model %s is not available in the", "data_file_name[5:][:-4] # Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times = protocol[:, 0] protocol", "print('Estimated noise level: ', noise_sigma) # Model model = m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc,", "info.temperature, # K ) LogPrior = { 'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, } #", "LogPrior = { 'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, } # Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times)", "= 542811797 print('Fit seed: ', fit_seed) np.random.seed(fit_seed) # Set parameter transformation transform_to_model_param =", "= pints.LogPosterior(loglikelihood, logprior) # Check logposterior is working fine priorparams = np.copy(info.base_param) transform_priorparams", "starting point: ') for i in fit_idx: f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave',", "= './out/' + info_id load_seed = 542811797 fit_idx = [1, 2, 3] transform_x0_list", "which_model = sys.argv[1] except: print('Usage: python %s [str:which_model]' % os.path.basename(__file__)) sys.exit() if which_model", "as m import parametertransform import priors \"\"\" Run fit. \"\"\" model_list = ['A',", "1] # Control fitting seed # fit_seed = np.random.randint(0, 2**30) fit_seed = 542811797", "'%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i) p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior:", "'./out/mcmc-' + info_id if not os.path.isdir(savedir): os.makedirs(savedir) data_file_name = 'data-sinewave.csv' print('Fitting to ',", "logposterior(transform_priorparams)) # Load fitting results calloaddir = './out/' + info_id load_seed = 542811797", "= 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' %", "p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1])) # Run mcmc =", "= np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times = protocol[:, 0] protocol = protocol[:, 1] #", "# Create Pints stuffs problem = pints.SingleOutputProblem(model, times, data) loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior", "mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas)) chains = mcmc.run() #", "-1] = c_tmp[:, -1] del(c_tmp) # Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas),", "priors \"\"\" Run fit. \"\"\" model_list = ['A', 'B', 'C'] try: which_model =", "os.path.isdir(savedir): os.makedirs(savedir) data_file_name = 'data-sinewave.csv' print('Fitting to ', data_file_name) print('Temperature: ', info.temperature) saveas", "+ '/' + data_file_name, delimiter=',', skiprows=1) # headers times = data[:, 0] data", "[1, 2, 3] transform_x0_list = [] print('MCMC starting point: ') for i in", "LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior,", "= m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 + info.temperature, # K ) LogPrior", ") LogPrior = { 'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, } # Update protocol model.set_fixed_form_voltage_protocol(protocol,", "import matplotlib.pyplot as plt import pints import pints.io import pints.plot import model as", "*chains_param) # Plot # burn in and thinning chains_final = chains[:, int(0.5 *", ":, -1] = c_tmp[:, -1] del(c_tmp) # Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv' % (savedir,", "matplotlib.pyplot as plt import pints import pints.io import pints.plot import model as m", "2, 3] transform_x0_list = [] print('MCMC starting point: ') for i in fit_idx:", "= protocol[:, 1] # Control fitting seed # fit_seed = np.random.randint(0, 2**30) fit_seed", "', logposterior(transform_priorparams)) for _ in range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) # Load fitting results", "print('MCMC starting point: ') for i in fit_idx: f = '%s/%s-solution-%s-%s.txt' % (calloaddir,", "# Check logposterior is working fine priorparams = np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams) priorparams", "current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 + info.temperature, # K ) LogPrior = { 'model_A':", "saveas = info_id + '-' + data_file_name[5:][:-4] # Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1,", "Pints stuffs problem = pints.SingleOutputProblem(model, times, data) loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param,", "pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir, saveas)) plt.close('all') pints.plot.trace(chains_param, ref_parameters=x0) plt.savefig('%s/%s-fig2.png' % (savedir,", "= pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. *", "# Plot # burn in and thinning chains_final = chains[:, int(0.5 * n_iter)::5,", "'./out/' + info_id load_seed = 542811797 fit_idx = [1, 2, 3] transform_x0_list =", "priorparams = np.append(priorparams, noise_sigma) transform_priorparams = np.append(transform_priorparams, noise_sigma) print('Posterior at prior parameters: ',", "= './data' savedir = './out/mcmc-' + info_id if not os.path.isdir(savedir): os.makedirs(savedir) data_file_name =", "which_model info = importlib.import_module(info_id) data_dir = './data' savedir = './out/mcmc-' + info_id if", "stuffs problem = pints.SingleOutputProblem(model, times, data) loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param)", "print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1])) # Run mcmc = pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter", "transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :, -1] = c_tmp[:, -1] del(c_tmp) # Save (de-transformed version)", "c_tmp[:, -1] del(c_tmp) # Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param) #", "[] print('MCMC starting point: ') for i in fit_idx: f = '%s/%s-solution-%s-%s.txt' %", "matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pints import pints.io import pints.plot import", "transform_priorparams = transform_from_model_param(priorparams) priorparams = np.append(priorparams, noise_sigma) transform_priorparams = np.append(transform_priorparams, noise_sigma) print('Posterior at", "= np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir, saveas)) plt.close('all') pints.plot.trace(chains_param, ref_parameters=x0)", "kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir, saveas)) plt.close('all') pints.plot.trace(chains_param, ref_parameters=x0) plt.savefig('%s/%s-fig2.png' % (savedir, saveas))", "Get all input variables import importlib sys.path.append('./mmt-model-files') info_id = 'model_%s' % which_model info", "import parametertransform import priors \"\"\" Run fit. \"\"\" model_list = ['A', 'B', 'C']", "noise_sigma) transform_priorparams = np.append(transform_priorparams, noise_sigma) print('Posterior at prior parameters: ', logposterior(transform_priorparams)) for _", "* n_iter)::5, :] chains_param = chains_param[:, int(0.5 * n_iter)::5, :] transform_x0 = transform_x0_list[0]", "data data = np.loadtxt(data_dir + '/' + data_file_name, delimiter=',', skiprows=1) # headers times", "= mcmc.run() # De-transform parameters chains_param = np.zeros(chains.shape) for i, c in enumerate(chains):", "in fit_idx: f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i) p = np.loadtxt(f)", "pints.io import pints.plot import model as m import parametertransform import priors \"\"\" Run", "pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior = pints.LogPosterior(loglikelihood,", "parameters: ', logposterior(transform_priorparams)) for _ in range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) # Load fitting", "= './out/mcmc-' + info_id if not os.path.isdir(savedir): os.makedirs(savedir) data_file_name = 'data-sinewave.csv' print('Fitting to", "c in enumerate(chains): c_tmp = np.copy(c) chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1]) chains_param[i,", "in enumerate(chains): c_tmp = np.copy(c) chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :,", "= parametertransform.log_transform_from_model_param # Load data data = np.loadtxt(data_dir + '/' + data_file_name, delimiter=',',", "np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times = protocol[:, 0] protocol = protocol[:, 1] # Control", "Plot # burn in and thinning chains_final = chains[:, int(0.5 * n_iter)::5, :]", "', fit_seed) np.random.seed(fit_seed) # Set parameter transformation transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param", "(savedir, saveas)) chains = mcmc.run() # De-transform parameters chains_param = np.zeros(chains.shape) for i,", "burn in and thinning chains_final = chains[:, int(0.5 * n_iter)::5, :] chains_param =", "import model as m import parametertransform import priors \"\"\" Run fit. \"\"\" model_list", "n_iter)::5, :] transform_x0 = transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png'", "# Run mcmc = pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter = 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05", "', info.temperature) saveas = info_id + '-' + data_file_name[5:][:-4] # Protocol protocol =", "for i, c in enumerate(chains): c_tmp = np.copy(c) chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:,", "x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir, saveas)) plt.close('all') pints.plot.trace(chains_param,", "os import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import", "raise ValueError('Input model %s is not available in the model list' \\ %", "os.makedirs(savedir) data_file_name = 'data-sinewave.csv' print('Fitting to ', data_file_name) print('Temperature: ', info.temperature) saveas =", "temperature=273.15 + info.temperature, # K ) LogPrior = { 'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior,", "# Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times = protocol[:, 0] protocol =", "mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas)) chains", "times, data) loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1 *", "model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create Pints stuffs problem = pints.SingleOutputProblem(model, times, data) loglikelihood =", "# Get all input variables import importlib sys.path.append('./mmt-model-files') info_id = 'model_%s' % which_model", "Model model = m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 + info.temperature, # K", "= { 'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, } # Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) #", "working fine priorparams = np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams) priorparams = np.append(priorparams, noise_sigma) transform_priorparams", "transform_x0_list = [] print('MCMC starting point: ') for i in fit_idx: f =", "pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma])", "1] noise_sigma = np.std(data[:500]) print('Estimated noise level: ', noise_sigma) # Model model =", "noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior = pints.LogPosterior(loglikelihood, logprior) # Check logposterior is", "= '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i) p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1])", "# De-transform parameters chains_param = np.zeros(chains.shape) for i, c in enumerate(chains): c_tmp =", "+ info_id if not os.path.isdir(savedir): os.makedirs(savedir) data_file_name = 'data-sinewave.csv' print('Fitting to ', data_file_name)", "input variables import importlib sys.path.append('./mmt-model-files') info_id = 'model_%s' % which_model info = importlib.import_module(info_id)", "542811797 print('Fit seed: ', fit_seed) np.random.seed(fit_seed) # Set parameter transformation transform_to_model_param = parametertransform.log_transform_to_model_param", "pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param) # Plot # burn in and thinning chains_final", "calloaddir = './out/' + info_id load_seed = 542811797 fit_idx = [1, 2, 3]", "not available in the model list' \\ % which_model) # Get all input", "# Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param) # Plot # burn", "noise_sigma) # Model model = m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 + info.temperature,", "sys.path.append('./method') import os import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as", "info = importlib.import_module(info_id) data_dir = './data' savedir = './out/mcmc-' + info_id if not", "==\\ logposterior(transform_priorparams)) # Load fitting results calloaddir = './out/' + info_id load_seed =", "enumerate(chains): c_tmp = np.copy(c) chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :, -1]", "= parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param # Load data data = np.loadtxt(data_dir + '/'", "__future__ import print_function import sys sys.path.append('./method') import os import numpy as np import", "protocol[:, 1] # Control fitting seed # fit_seed = np.random.randint(0, 2**30) fit_seed =", "Control fitting seed # fit_seed = np.random.randint(0, 2**30) fit_seed = 542811797 print('Fit seed:", "Run mcmc = pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter = 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 *", "De-transform parameters chains_param = np.zeros(chains.shape) for i, c in enumerate(chains): c_tmp = np.copy(c)", "Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create Pints stuffs problem = pints.SingleOutputProblem(model, times, data)", "# Load data data = np.loadtxt(data_dir + '/' + data_file_name, delimiter=',', skiprows=1) #", "mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas)) chains = mcmc.run() # De-transform parameters chains_param = np.zeros(chains.shape)", "fitting seed # fit_seed = np.random.randint(0, 2**30) fit_seed = 542811797 print('Fit seed: ',", "', logposterior(transform_x0_list[-1])) # Run mcmc = pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter = 100000", "np.std(data[:500]) print('Estimated noise level: ', noise_sigma) # Model model = m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list,", "importlib sys.path.append('./mmt-model-files') info_id = 'model_%s' % which_model info = importlib.import_module(info_id) data_dir = './data'", "3] transform_x0_list = [] print('MCMC starting point: ') for i in fit_idx: f", "transform=transform_to_model_param, temperature=273.15 + info.temperature, # K ) LogPrior = { 'model_A': priors.ModelALogPrior, 'model_B':", "chains_param[i, :, -1] = c_tmp[:, -1] del(c_tmp) # Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv' %", "data_file_name) print('Temperature: ', info.temperature) saveas = info_id + '-' + data_file_name[5:][:-4] # Protocol", "= transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :, -1] = c_tmp[:, -1] del(c_tmp) # Save (de-transformed", "#!/usr/bin/env python3 from __future__ import print_function import sys sys.path.append('./method') import os import numpy", "np.random.randint(0, 2**30) fit_seed = 542811797 print('Fit seed: ', fit_seed) np.random.seed(fit_seed) # Set parameter", "noise_sigma = np.std(data[:500]) print('Estimated noise level: ', noise_sigma) # Model model = m.Model(info.model_file,", "saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas)) chains = mcmc.run() # De-transform parameters chains_param =", "= np.append(transform_priorparams, noise_sigma) print('Posterior at prior parameters: ', logposterior(transform_priorparams)) for _ in range(10):", "') for i in fit_idx: f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i)", "['A', 'B', 'C'] try: which_model = sys.argv[1] except: print('Usage: python %s [str:which_model]' %", "import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pints import pints.io import pints.plot", "list' \\ % which_model) # Get all input variables import importlib sys.path.append('./mmt-model-files') info_id", "pints.LogPosterior(loglikelihood, logprior) # Check logposterior is working fine priorparams = np.copy(info.base_param) transform_priorparams =", "'model_B': priors.ModelBLogPrior, } # Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create Pints stuffs problem", "fine priorparams = np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams) priorparams = np.append(priorparams, noise_sigma) transform_priorparams =", "fit_idx = [1, 2, 3] transform_x0_list = [] print('MCMC starting point: ') for", "Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param) # Plot # burn in", "'model_%s' % which_model info = importlib.import_module(info_id) data_dir = './data' savedir = './out/mcmc-' +", "times = data[:, 0] data = data[:, 1] noise_sigma = np.std(data[:500]) print('Estimated noise", "data[:, 1] noise_sigma = np.std(data[:500]) print('Estimated noise level: ', noise_sigma) # Model model", "(de-transformed version) pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param) # Plot # burn in and", "if which_model not in model_list: raise ValueError('Input model %s is not available in", "data_dir = './data' savedir = './out/mcmc-' + info_id if not os.path.isdir(savedir): os.makedirs(savedir) data_file_name", "parametertransform import priors \"\"\" Run fit. \"\"\" model_list = ['A', 'B', 'C'] try:", "c_tmp = np.copy(c) chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :, -1] =", "skiprows=1) # headers times = data[:, 0] data = data[:, 1] noise_sigma =", "lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior", "logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior = pints.LogPosterior(loglikelihood, logprior) # Check logposterior is working", "model as m import parametertransform import priors \"\"\" Run fit. \"\"\" model_list =", "# Model model = m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15 + info.temperature, #", "and thinning chains_final = chains[:, int(0.5 * n_iter)::5, :] chains_param = chains_param[:, int(0.5", "transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir, saveas)) plt.close('all') pints.plot.trace(chains_param, ref_parameters=x0) plt.savefig('%s/%s-fig2.png' %", "seed: ', fit_seed) np.random.seed(fit_seed) # Set parameter transformation transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param =", "# K ) LogPrior = { 'model_A': priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, } # Update", ":] transform_x0 = transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' %", "transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param # Load data data = np.loadtxt(data_dir +", "f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i) p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma))", "method=pints.PopulationMCMC) n_iter = 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas))", "Set parameter transformation transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param # Load data data", "data = data[:, 1] noise_sigma = np.std(data[:500]) print('Estimated noise level: ', noise_sigma) #", "[str:which_model]' % os.path.basename(__file__)) sys.exit() if which_model not in model_list: raise ValueError('Input model %s", "problem = pints.SingleOutputProblem(model, times, data) loglikelihood = pints.GaussianLogLikelihood(problem) logmodelprior = LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior", "'C'] try: which_model = sys.argv[1] except: print('Usage: python %s [str:which_model]' % os.path.basename(__file__)) sys.exit()", "_ in range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) # Load fitting results calloaddir = './out/'", "fit_seed = 542811797 print('Fit seed: ', fit_seed) np.random.seed(fit_seed) # Set parameter transformation transform_to_model_param", "= ['A', 'B', 'C'] try: which_model = sys.argv[1] except: print('Usage: python %s [str:which_model]'", "int(0.5 * n_iter)::5, :] chains_param = chains_param[:, int(0.5 * n_iter)::5, :] transform_x0 =", "[10. * noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior = pints.LogPosterior(loglikelihood, logprior) # Check", "in the model list' \\ % which_model) # Get all input variables import", "sys.path.append('./mmt-model-files') info_id = 'model_%s' % which_model info = importlib.import_module(info_id) data_dir = './data' savedir", "= importlib.import_module(info_id) data_dir = './data' savedir = './out/mcmc-' + info_id if not os.path.isdir(savedir):", "not in model_list: raise ValueError('Input model %s is not available in the model", "in model_list: raise ValueError('Input model %s is not available in the model list'", "'/' + data_file_name, delimiter=',', skiprows=1) # headers times = data[:, 0] data =", "import pints.plot import model as m import parametertransform import priors \"\"\" Run fit.", "', data_file_name) print('Temperature: ', info.temperature) saveas = info_id + '-' + data_file_name[5:][:-4] #", "protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create Pints stuffs problem = pints.SingleOutputProblem(model, times, data) loglikelihood", "in range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) # Load fitting results calloaddir = './out/' +", "= 'model_%s' % which_model info = importlib.import_module(info_id) data_dir = './data' savedir = './out/mcmc-'", "= [] print('MCMC starting point: ') for i in fit_idx: f = '%s/%s-solution-%s-%s.txt'", "print('Posterior: ', logposterior(transform_x0_list[-1])) # Run mcmc = pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter =", "data_file_name = 'data-sinewave.csv' print('Fitting to ', data_file_name) print('Temperature: ', info.temperature) saveas = info_id", "pints import pints.io import pints.plot import model as m import parametertransform import priors", "% os.path.basename(__file__)) sys.exit() if which_model not in model_list: raise ValueError('Input model %s is", "0] data = data[:, 1] noise_sigma = np.std(data[:500]) print('Estimated noise level: ', noise_sigma)", "mcmc.run() # De-transform parameters chains_param = np.zeros(chains.shape) for i, c in enumerate(chains): c_tmp", "= chains_param[:, int(0.5 * n_iter)::5, :] transform_x0 = transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1])", "not os.path.isdir(savedir): os.makedirs(savedir) data_file_name = 'data-sinewave.csv' print('Fitting to ', data_file_name) print('Temperature: ', info.temperature)", "lognoiseprior) logposterior = pints.LogPosterior(loglikelihood, logprior) # Check logposterior is working fine priorparams =", "transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0) plt.savefig('%s/%s-fig1.png' % (savedir, saveas)) plt.close('all')", "= pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list, method=pints.PopulationMCMC) n_iter = 100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False)", "= LogPrior[info_id](transform_to_model_param, transform_from_model_param) lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma]) logprior =", "* noise_sigma]) logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior = pints.LogPosterior(loglikelihood, logprior) # Check logposterior", "logprior) # Check logposterior is working fine priorparams = np.copy(info.base_param) transform_priorparams = transform_from_model_param(priorparams)", "chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :, -1] = c_tmp[:, -1] del(c_tmp)", "% which_model) # Get all input variables import importlib sys.path.append('./mmt-model-files') info_id = 'model_%s'", "model %s is not available in the model list' \\ % which_model) #", "% which_model info = importlib.import_module(info_id) data_dir = './data' savedir = './out/mcmc-' + info_id", "'-' + data_file_name[5:][:-4] # Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times = protocol[:,", "parametertransform.log_transform_from_model_param # Load data data = np.loadtxt(data_dir + '/' + data_file_name, delimiter=',', skiprows=1)", "np.zeros(chains.shape) for i, c in enumerate(chains): c_tmp = np.copy(c) chains_param[i, :, :-1] =", "sys.argv[1] except: print('Usage: python %s [str:which_model]' % os.path.basename(__file__)) sys.exit() if which_model not in", "+ data_file_name[5:][:-4] # Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times = protocol[:, 0]", "% (calloaddir, 'sinewave', load_seed, i) p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ',", "transform_from_model_param = parametertransform.log_transform_from_model_param # Load data data = np.loadtxt(data_dir + '/' + data_file_name,", "chains[:, int(0.5 * n_iter)::5, :] chains_param = chains_param[:, int(0.5 * n_iter)::5, :] transform_x0", "protocol_times = protocol[:, 0] protocol = protocol[:, 1] # Control fitting seed #", ":-1]) chains_param[i, :, -1] = c_tmp[:, -1] del(c_tmp) # Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv'", "transform_from_model_param(priorparams) priorparams = np.append(priorparams, noise_sigma) transform_priorparams = np.append(transform_priorparams, noise_sigma) print('Posterior at prior parameters:", "(calloaddir, 'sinewave', load_seed, i) p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1]))", "= np.random.randint(0, 2**30) fit_seed = 542811797 print('Fit seed: ', fit_seed) np.random.seed(fit_seed) # Set", "range(10): assert(logposterior(transform_priorparams) ==\\ logposterior(transform_priorparams)) # Load fitting results calloaddir = './out/' + info_id", "saveas), *chains_param) # Plot # burn in and thinning chains_final = chains[:, int(0.5", "delimiter=',') protocol_times = protocol[:, 0] protocol = protocol[:, 1] # Control fitting seed", "priors.ModelALogPrior, 'model_B': priors.ModelBLogPrior, } # Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create Pints stuffs", "Protocol protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1, delimiter=',') protocol_times = protocol[:, 0] protocol = protocol[:,", "pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior = pints.LogPosterior(loglikelihood, logprior) # Check logposterior is working fine priorparams", "import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pints", "= pints.ComposedLogPrior(logmodelprior, lognoiseprior) logposterior = pints.LogPosterior(loglikelihood, logprior) # Check logposterior is working fine", "transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1])) # Run mcmc = pints.MCMCController(logposterior, len(transform_x0_list), transform_x0_list,", "chains_param = np.zeros(chains.shape) for i, c in enumerate(chains): c_tmp = np.copy(c) chains_param[i, :,", "import pints import pints.io import pints.plot import model as m import parametertransform import", "np.copy(c) chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1]) chains_param[i, :, -1] = c_tmp[:, -1]", "# Control fitting seed # fit_seed = np.random.randint(0, 2**30) fit_seed = 542811797 print('Fit", "import print_function import sys sys.path.append('./method') import os import numpy as np import matplotlib", "which_model) # Get all input variables import importlib sys.path.append('./mmt-model-files') info_id = 'model_%s' %", "-1] del(c_tmp) # Save (de-transformed version) pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param) # Plot", "point: ') for i in fit_idx: f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed,", "mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas))", "parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param # Load data data = np.loadtxt(data_dir + '/' +", "level: ', noise_sigma) # Model model = m.Model(info.model_file, variables=info.parameters, current_readout=info.current_list, set_ion=info.ions_conc, transform=transform_to_model_param, temperature=273.15", "= sys.argv[1] except: print('Usage: python %s [str:which_model]' % os.path.basename(__file__)) sys.exit() if which_model not", "pints.plot import model as m import parametertransform import priors \"\"\" Run fit. \"\"\"", "# headers times = data[:, 0] data = data[:, 1] noise_sigma = np.std(data[:500])", "print_function import sys sys.path.append('./method') import os import numpy as np import matplotlib matplotlib.use('Agg')", "chains_param[:, int(0.5 * n_iter)::5, :] transform_x0 = transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0],", "np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pints import pints.io import", "noise_sigma) print('Posterior at prior parameters: ', logposterior(transform_priorparams)) for _ in range(10): assert(logposterior(transform_priorparams) ==\\", "= [1, 2, 3] transform_x0_list = [] print('MCMC starting point: ') for i", "* n_iter)::5, :] transform_x0 = transform_x0_list[0] x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1]) pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0)", "\"\"\" Run fit. \"\"\" model_list = ['A', 'B', 'C'] try: which_model = sys.argv[1]", "priors.ModelBLogPrior, } # Update protocol model.set_fixed_form_voltage_protocol(protocol, protocol_times) # Create Pints stuffs problem =", "is not available in the model list' \\ % which_model) # Get all", "fit_seed) np.random.seed(fit_seed) # Set parameter transformation transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param #", "load_seed, i) p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1])) # Run", "2**30) fit_seed = 542811797 print('Fit seed: ', fit_seed) np.random.seed(fit_seed) # Set parameter transformation", "all input variables import importlib sys.path.append('./mmt-model-files') info_id = 'model_%s' % which_model info =", "print('Temperature: ', info.temperature) saveas = info_id + '-' + data_file_name[5:][:-4] # Protocol protocol", "100000 mcmc.set_max_iterations(n_iter) mcmc.set_initial_phase_iterations(int(0.05 * n_iter)) mcmc.set_parallel(False) mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas)) mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir,", "python3 from __future__ import print_function import sys sys.path.append('./method') import os import numpy as", "'sinewave', load_seed, i) p = np.loadtxt(f) transform_x0_list.append(np.append(transform_from_model_param(p), noise_sigma)) print(transform_x0_list[-1]) print('Posterior: ', logposterior(transform_x0_list[-1])) #", "sys.exit() if which_model not in model_list: raise ValueError('Input model %s is not available", "transformation transform_to_model_param = parametertransform.log_transform_to_model_param transform_from_model_param = parametertransform.log_transform_from_model_param # Load data data = np.loadtxt(data_dir" ]
[ "\"ShellParseException: %s\" % parseError.msg except ParseException, parseError: return random.choice([\"Sorry, I don't understand that.\",", "readVerb = CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda s : makeCmd(s).setParseAction( replaceWith(s.split()[0]))", "(invCommand | mapCommand | useCommand | openCommand | dropCommand | takeCommand | moveCommand", "readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand)", "myriad.item import Item class ShellParseException(ParseException): pass class ShellParser(object): def __init__(self, session=None): self.session =", "readCommand.setParseAction(command.ReadCommand) return (invCommand | mapCommand | useCommand | openCommand | dropCommand | takeCommand", "s: MatchFirst(map(CaselessKeyword, s.split())) invVerb = makeCmd(\"INV INVENTORY I\") mapVerb = makeCmd(\"MAP M\") dropVerb", "quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand | mapCommand | useCommand | openCommand", "quitCommand | readCommand).setResultsName(\"command\") + LineEnd() def validateItemName(self, s, l, t): iname = \"", "| wDir | neDir | seDir | swDir \\ | nwDir invCommand =", "takeCommand = takeVerb + itemRef(\"item\") useCommand = useVerb + itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN", "cmdParseAction def makeBNF(self): makeCmd = lambda s: MatchFirst(map(CaselessKeyword, s.split())) invVerb = makeCmd(\"INV INVENTORY", "l, tokens): return cls(tokens) return cmdParseAction def makeBNF(self): makeCmd = lambda s: MatchFirst(map(CaselessKeyword,", "def __init__(self, session=None): self.session = session self.bnf = self.makeBNF() def makeCommandParseAction(self, cls): def", "return random.choice([\"Sorry, I don't understand that.\", \"Say what?\", \"Whatchyoo talkin' 'bout, Willis?\", \"Huh?\",", "from myriad.item import Item class ShellParseException(ParseException): pass class ShellParser(object): def __init__(self, session=None): self.session", "openCommand | dropCommand | takeCommand | moveCommand | lookCommand | doorsCommand | helpCommand", "+ itemRef(\"item\") useCommand = useVerb + itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN ON\", caseless=True)) +", "= makeDir(\"NW NORTHWEST\") moveDirection = nDir | sDir | eDir | wDir |", "takeCommand | moveCommand | lookCommand | doorsCommand | helpCommand | quitCommand | readCommand).setResultsName(\"command\")", "itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda s : makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir = makeDir(\"N", "try: return self.bnf.parseString(cmdstr) except ShellParseException, parseError: print \"ShellParseException: %s\" % parseError.msg except ParseException,", "I\") mapVerb = makeCmd(\"MAP M\") dropVerb = makeCmd(\"DROP LEAVE\") takeVerb = makeCmd(\"TAKE PICKUP\")", "raise ShellParseException(s, l, \"No such item '%s'.\" % iname) return iname def parseCmd(self,", "import CaselessKeyword, LineEnd, MatchFirst, Word from myriad.game.shell import command from myriad.item import Item", "I don't understand that.\", \"Say what?\", \"Whatchyoo talkin' 'bout, Willis?\", \"Huh?\", \"Garbage in,", "CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H HELP ?\") readVerb = CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir", "= self.makeBNF() def makeCommandParseAction(self, cls): def cmdParseAction(s, l, tokens): return cls(tokens) return cmdParseAction", "parseCmd(self, cmdstr): try: return self.bnf.parseString(cmdstr) except ShellParseException, parseError: print \"ShellParseException: %s\" % parseError.msg", "in Item.items: raise ShellParseException(s, l, \"No such item '%s'.\" % iname) return iname", "out. Try again.\", \"What was the middle part again?\", \"Excuse me?\", \"Wtf?\", \"Uh...", "CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda s : makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir =", "def parseCmd(self, cmdstr): try: return self.bnf.parseString(cmdstr) except ShellParseException, parseError: print \"ShellParseException: %s\" %", "= makeDir(\"W WEST\") neDir = makeDir(\"NE NORTHEAST\") seDir = makeDir(\"SE SOUTHEAST\") swDir =", "'%s'.\" % iname) return iname def parseCmd(self, cmdstr): try: return self.bnf.parseString(cmdstr) except ShellParseException,", "Q\") lookVerb = makeCmd(\"LOOK L\") doorsVerb = CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H HELP ?\")", "= quitVerb lookCommand = lookVerb doorsCommand = doorsVerb helpCommand = helpVerb readCommand =", "useCommand | openCommand | dropCommand | takeCommand | moveCommand | lookCommand | doorsCommand", "= readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand)", "Item class ShellParseException(ParseException): pass class ShellParser(object): def __init__(self, session=None): self.session = session self.bnf", "def validateItemName(self, s, l, t): iname = \" \".join(t) if iname not in", "what?\", \"Whatchyoo talkin' 'bout, Willis?\", \"Huh?\", \"Garbage in, garbage out. Try again.\", \"What", "+ CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE GO\") | empty useVerb = makeCmd(\"USE U\") openVerb", "ShellParser(object): def __init__(self, session=None): self.session = session self.bnf = self.makeBNF() def makeCommandParseAction(self, cls):", "= makeCmd(\"QUIT Q\") lookVerb = makeCmd(\"LOOK L\") doorsVerb = CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H", "import CaselessLiteral, OneOrMore, Optional, ParseException from pyparsing import CaselessKeyword, LineEnd, MatchFirst, Word from", "readCommand).setResultsName(\"command\") + LineEnd() def validateItemName(self, s, l, t): iname = \" \".join(t) if", "ParseException, parseError: return random.choice([\"Sorry, I don't understand that.\", \"Say what?\", \"Whatchyoo talkin' 'bout,", "itemRef(\"item\") useCommand = useVerb + itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN ON\", caseless=True)) + \\", "wDir | neDir | seDir | swDir \\ | nwDir invCommand = invVerb", "talkin' 'bout, Willis?\", \"Huh?\", \"Garbage in, garbage out. Try again.\", \"What was the", "itemRef(\"item\") moveCommand = moveVerb + moveDirection(\"direction\") quitCommand = quitVerb lookCommand = lookVerb doorsCommand", "session self.bnf = self.makeBNF() def makeCommandParseAction(self, cls): def cmdParseAction(s, l, tokens): return cls(tokens)", "invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return", "SOUTH\") eDir = makeDir(\"E EAST\") wDir = makeDir(\"W WEST\") neDir = makeDir(\"NE NORTHEAST\")", "+ itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand)", "swDir \\ | nwDir invCommand = invVerb mapCommand = mapVerb dropCommand = dropVerb", "return (invCommand | mapCommand | useCommand | openCommand | dropCommand | takeCommand |", "session=None): self.session = session self.bnf = self.makeBNF() def makeCommandParseAction(self, cls): def cmdParseAction(s, l,", "LineEnd, MatchFirst, Word from myriad.game.shell import command from myriad.item import Item class ShellParseException(ParseException):", "SOUTHWEST\") nwDir = makeDir(\"NW NORTHWEST\") moveDirection = nDir | sDir | eDir |", "not in Item.items: raise ShellParseException(s, l, \"No such item '%s'.\" % iname) return", "U\") openVerb = makeCmd(\"OPEN O\") quitVerb = makeCmd(\"QUIT Q\") lookVerb = makeCmd(\"LOOK L\")", "pyparsing import CaselessLiteral, OneOrMore, Optional, ParseException from pyparsing import CaselessKeyword, LineEnd, MatchFirst, Word", "= makeCmd(\"LOOK L\") doorsVerb = CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H HELP ?\") readVerb =", "= makeDir(\"SE SOUTHEAST\") swDir = makeDir(\"SW SOUTHWEST\") nwDir = makeDir(\"NW NORTHWEST\") moveDirection =", "makeCmd(\"H HELP ?\") readVerb = CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda s", "lookVerb doorsCommand = doorsVerb helpCommand = helpVerb readCommand = readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand)", "Willis?\", \"Huh?\", \"Garbage in, garbage out. Try again.\", \"What was the middle part", "INVENTORY I\") mapVerb = makeCmd(\"MAP M\") dropVerb = makeCmd(\"DROP LEAVE\") takeVerb = makeCmd(\"TAKE", "makeDir(\"NE NORTHEAST\") seDir = makeDir(\"SE SOUTHEAST\") swDir = makeDir(\"SW SOUTHWEST\") nwDir = makeDir(\"NW", "= takeVerb + itemRef(\"item\") useCommand = useVerb + itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN ON\",", "neDir | seDir | swDir \\ | nwDir invCommand = invVerb mapCommand =", "class ShellParser(object): def __init__(self, session=None): self.session = session self.bnf = self.makeBNF() def makeCommandParseAction(self,", "helpVerb = makeCmd(\"H HELP ?\") readVerb = CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir =", "CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE GO\") | empty useVerb = makeCmd(\"USE U\") openVerb =", "myriad.game.shell import command from myriad.item import Item class ShellParseException(ParseException): pass class ShellParser(object): def", "dropVerb = makeCmd(\"DROP LEAVE\") takeVerb = makeCmd(\"TAKE PICKUP\") | \\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\"))", "Optional, ParseException from pyparsing import CaselessKeyword, LineEnd, MatchFirst, Word from myriad.game.shell import command", "makeDir(\"W WEST\") neDir = makeDir(\"NE NORTHEAST\") seDir = makeDir(\"SE SOUTHEAST\") swDir = makeDir(\"SW", "default=None)(\"targetObj\") openCommand = openVerb + itemRef(\"item\") moveCommand = moveVerb + moveDirection(\"direction\") quitCommand =", "cmdParseAction(s, l, tokens): return cls(tokens) return cmdParseAction def makeBNF(self): makeCmd = lambda s:", "ShellParseException(ParseException): pass class ShellParser(object): def __init__(self, session=None): self.session = session self.bnf = self.makeBNF()", "makeCmd(\"INV INVENTORY I\") mapVerb = makeCmd(\"MAP M\") dropVerb = makeCmd(\"DROP LEAVE\") takeVerb =", "pyparsing import alphas, empty, oneOf, replaceWith from pyparsing import CaselessLiteral, OneOrMore, Optional, ParseException", "def cmdParseAction(s, l, tokens): return cls(tokens) return cmdParseAction def makeBNF(self): makeCmd = lambda", "makeBNF(self): makeCmd = lambda s: MatchFirst(map(CaselessKeyword, s.split())) invVerb = makeCmd(\"INV INVENTORY I\") mapVerb", "wDir = makeDir(\"W WEST\") neDir = makeDir(\"NE NORTHEAST\") seDir = makeDir(\"SE SOUTHEAST\") swDir", "= makeDir(\"NE NORTHEAST\") seDir = makeDir(\"SE SOUTHEAST\") swDir = makeDir(\"SW SOUTHWEST\") nwDir =", "O\") quitVerb = makeCmd(\"QUIT Q\") lookVerb = makeCmd(\"LOOK L\") doorsVerb = CaselessKeyword(\"DOORS\") helpVerb", "return iname def parseCmd(self, cmdstr): try: return self.bnf.parseString(cmdstr) except ShellParseException, parseError: print \"ShellParseException:", "= session self.bnf = self.makeBNF() def makeCommandParseAction(self, cls): def cmdParseAction(s, l, tokens): return", "nwDir = makeDir(\"NW NORTHWEST\") moveDirection = nDir | sDir | eDir | wDir", "OneOrMore, Optional, ParseException from pyparsing import CaselessKeyword, LineEnd, MatchFirst, Word from myriad.game.shell import", "= helpVerb readCommand = readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand)", "mapCommand = mapVerb dropCommand = dropVerb + itemRef(\"item\") takeCommand = takeVerb + itemRef(\"item\")", "invVerb = makeCmd(\"INV INVENTORY I\") mapVerb = makeCmd(\"MAP M\") dropVerb = makeCmd(\"DROP LEAVE\")", "itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand)", ": makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir = makeDir(\"N NORTH\") sDir = makeDir(\"S SOUTH\") eDir =", "from pyparsing import CaselessKeyword, LineEnd, MatchFirst, Word from myriad.game.shell import command from myriad.item", "| nwDir invCommand = invVerb mapCommand = mapVerb dropCommand = dropVerb + itemRef(\"item\")", "makeCmd(\"LOOK L\") doorsVerb = CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H HELP ?\") readVerb = CaselessKeyword(\"READ\")", "makeCmd(\"QUIT Q\") lookVerb = makeCmd(\"LOOK L\") doorsVerb = CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H HELP", "cmdstr): try: return self.bnf.parseString(cmdstr) except ShellParseException, parseError: print \"ShellParseException: %s\" % parseError.msg except", "+ \\ Optional(oneOf(\"IN ON\", caseless=True)) + \\ Optional(itemRef, default=None)(\"targetObj\") openCommand = openVerb +", "doorsCommand = doorsVerb helpCommand = helpVerb readCommand = readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand)", "+ itemRef(\"item\") takeCommand = takeVerb + itemRef(\"item\") useCommand = useVerb + itemRef(\"usedObj\") +", "import random from pyparsing import alphas, empty, oneOf, replaceWith from pyparsing import CaselessLiteral,", "useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand | mapCommand |", "\" \".join(t) if iname not in Item.items: raise ShellParseException(s, l, \"No such item", "command from myriad.item import Item class ShellParseException(ParseException): pass class ShellParser(object): def __init__(self, session=None):", "makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir = makeDir(\"N NORTH\") sDir = makeDir(\"S SOUTH\") eDir = makeDir(\"E", "= CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H HELP ?\") readVerb = CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName)", "takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand | mapCommand", "except ParseException, parseError: return random.choice([\"Sorry, I don't understand that.\", \"Say what?\", \"Whatchyoo talkin'", "makeCmd = lambda s: MatchFirst(map(CaselessKeyword, s.split())) invVerb = makeCmd(\"INV INVENTORY I\") mapVerb =", "makeDir(\"SW SOUTHWEST\") nwDir = makeDir(\"NW NORTHWEST\") moveDirection = nDir | sDir | eDir", "\\ Optional(oneOf(\"IN ON\", caseless=True)) + \\ Optional(itemRef, default=None)(\"targetObj\") openCommand = openVerb + itemRef(\"item\")", "moveCommand = moveVerb + moveDirection(\"direction\") quitCommand = quitVerb lookCommand = lookVerb doorsCommand =", "replaceWith(s.split()[0])) nDir = makeDir(\"N NORTH\") sDir = makeDir(\"S SOUTH\") eDir = makeDir(\"E EAST\")", "empty useVerb = makeCmd(\"USE U\") openVerb = makeCmd(\"OPEN O\") quitVerb = makeCmd(\"QUIT Q\")", "class ShellParseException(ParseException): pass class ShellParser(object): def __init__(self, session=None): self.session = session self.bnf =", "def makeBNF(self): makeCmd = lambda s: MatchFirst(map(CaselessKeyword, s.split())) invVerb = makeCmd(\"INV INVENTORY I\")", "CaselessKeyword, LineEnd, MatchFirst, Word from myriad.game.shell import command from myriad.item import Item class", "pyparsing import CaselessKeyword, LineEnd, MatchFirst, Word from myriad.game.shell import command from myriad.item import", "print \"ShellParseException: %s\" % parseError.msg except ParseException, parseError: return random.choice([\"Sorry, I don't understand", "neDir = makeDir(\"NE NORTHEAST\") seDir = makeDir(\"SE SOUTHEAST\") swDir = makeDir(\"SW SOUTHWEST\") nwDir", "invCommand = invVerb mapCommand = mapVerb dropCommand = dropVerb + itemRef(\"item\") takeCommand =", "makeDir(\"NW NORTHWEST\") moveDirection = nDir | sDir | eDir | wDir | neDir", "openCommand = openVerb + itemRef(\"item\") moveCommand = moveVerb + moveDirection(\"direction\") quitCommand = quitVerb", "\\ | nwDir invCommand = invVerb mapCommand = mapVerb dropCommand = dropVerb +", "lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand | mapCommand | useCommand | openCommand |", "= useVerb + itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN ON\", caseless=True)) + \\ Optional(itemRef, default=None)(\"targetObj\")", "\"Say what?\", \"Whatchyoo talkin' 'bout, Willis?\", \"Huh?\", \"Garbage in, garbage out. Try again.\",", "swDir = makeDir(\"SW SOUTHWEST\") nwDir = makeDir(\"NW NORTHWEST\") moveDirection = nDir | sDir", "ShellParseException, parseError: print \"ShellParseException: %s\" % parseError.msg except ParseException, parseError: return random.choice([\"Sorry, I", "= makeCmd(\"MOVE GO\") | empty useVerb = makeCmd(\"USE U\") openVerb = makeCmd(\"OPEN O\")", "return self.bnf.parseString(cmdstr) except ShellParseException, parseError: print \"ShellParseException: %s\" % parseError.msg except ParseException, parseError:", "alphas, empty, oneOf, replaceWith from pyparsing import CaselessLiteral, OneOrMore, Optional, ParseException from pyparsing", "doorsCommand | helpCommand | quitCommand | readCommand).setResultsName(\"command\") + LineEnd() def validateItemName(self, s, l,", "\"Garbage in, garbage out. Try again.\", \"What was the middle part again?\", \"Excuse", "NORTH\") sDir = makeDir(\"S SOUTH\") eDir = makeDir(\"E EAST\") wDir = makeDir(\"W WEST\")", "l, \"No such item '%s'.\" % iname) return iname def parseCmd(self, cmdstr): try:", "helpVerb readCommand = readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand)", "if iname not in Item.items: raise ShellParseException(s, l, \"No such item '%s'.\" %", "makeCmd(\"DROP LEAVE\") takeVerb = makeCmd(\"TAKE PICKUP\") | \\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb =", "| quitCommand | readCommand).setResultsName(\"command\") + LineEnd() def validateItemName(self, s, l, t): iname =", "sDir | eDir | wDir | neDir | seDir | swDir \\ |", "dropCommand | takeCommand | moveCommand | lookCommand | doorsCommand | helpCommand | quitCommand", "Word from myriad.game.shell import command from myriad.item import Item class ShellParseException(ParseException): pass class", "GO\") | empty useVerb = makeCmd(\"USE U\") openVerb = makeCmd(\"OPEN O\") quitVerb =", "s : makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir = makeDir(\"N NORTH\") sDir = makeDir(\"S SOUTH\") eDir", "from pyparsing import alphas, empty, oneOf, replaceWith from pyparsing import CaselessLiteral, OneOrMore, Optional,", "EAST\") wDir = makeDir(\"W WEST\") neDir = makeDir(\"NE NORTHEAST\") seDir = makeDir(\"SE SOUTHEAST\")", "itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN ON\", caseless=True)) + \\ Optional(itemRef, default=None)(\"targetObj\") openCommand = openVerb", "\\ Optional(itemRef, default=None)(\"targetObj\") openCommand = openVerb + itemRef(\"item\") moveCommand = moveVerb + moveDirection(\"direction\")", "Item.items: raise ShellParseException(s, l, \"No such item '%s'.\" % iname) return iname def", "takeVerb + itemRef(\"item\") useCommand = useVerb + itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN ON\", caseless=True))", "= lookVerb doorsCommand = doorsVerb helpCommand = helpVerb readCommand = readVerb + itemRef(\"subjectObj\")", "mapVerb dropCommand = dropVerb + itemRef(\"item\") takeCommand = takeVerb + itemRef(\"item\") useCommand =", "= makeCmd(\"MAP M\") dropVerb = makeCmd(\"DROP LEAVE\") takeVerb = makeCmd(\"TAKE PICKUP\") | \\", "= makeDir(\"N NORTH\") sDir = makeDir(\"S SOUTH\") eDir = makeDir(\"E EAST\") wDir =", "dropVerb + itemRef(\"item\") takeCommand = takeVerb + itemRef(\"item\") useCommand = useVerb + itemRef(\"usedObj\")", "self.session = session self.bnf = self.makeBNF() def makeCommandParseAction(self, cls): def cmdParseAction(s, l, tokens):", "dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand |", "= makeCmd(\"H HELP ?\") readVerb = CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda", "'bout, Willis?\", \"Huh?\", \"Garbage in, garbage out. Try again.\", \"What was the middle", "MatchFirst(map(CaselessKeyword, s.split())) invVerb = makeCmd(\"INV INVENTORY I\") mapVerb = makeCmd(\"MAP M\") dropVerb =", "\"Huh?\", \"Garbage in, garbage out. Try again.\", \"What was the middle part again?\",", "makeCommandParseAction(self, cls): def cmdParseAction(s, l, tokens): return cls(tokens) return cmdParseAction def makeBNF(self): makeCmd", "t): iname = \" \".join(t) if iname not in Item.items: raise ShellParseException(s, l,", "readCommand = readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand)", "| doorsCommand | helpCommand | quitCommand | readCommand).setResultsName(\"command\") + LineEnd() def validateItemName(self, s,", "return cls(tokens) return cmdParseAction def makeBNF(self): makeCmd = lambda s: MatchFirst(map(CaselessKeyword, s.split())) invVerb", "LineEnd() def validateItemName(self, s, l, t): iname = \" \".join(t) if iname not", "L\") doorsVerb = CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H HELP ?\") readVerb = CaselessKeyword(\"READ\") itemRef", "= CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda s : makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir", "LEAVE\") takeVerb = makeCmd(\"TAKE PICKUP\") | \\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE", "parseError: print \"ShellParseException: %s\" % parseError.msg except ParseException, parseError: return random.choice([\"Sorry, I don't", "mapCommand | useCommand | openCommand | dropCommand | takeCommand | moveCommand | lookCommand", "WEST\") neDir = makeDir(\"NE NORTHEAST\") seDir = makeDir(\"SE SOUTHEAST\") swDir = makeDir(\"SW SOUTHWEST\")", "OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda s : makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir = makeDir(\"N NORTH\") sDir", "= openVerb + itemRef(\"item\") moveCommand = moveVerb + moveDirection(\"direction\") quitCommand = quitVerb lookCommand", "doorsVerb helpCommand = helpVerb readCommand = readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand)", "ON\", caseless=True)) + \\ Optional(itemRef, default=None)(\"targetObj\") openCommand = openVerb + itemRef(\"item\") moveCommand =", "CaselessLiteral, OneOrMore, Optional, ParseException from pyparsing import CaselessKeyword, LineEnd, MatchFirst, Word from myriad.game.shell", "empty, oneOf, replaceWith from pyparsing import CaselessLiteral, OneOrMore, Optional, ParseException from pyparsing import", "+ \\ Optional(itemRef, default=None)(\"targetObj\") openCommand = openVerb + itemRef(\"item\") moveCommand = moveVerb +", "| seDir | swDir \\ | nwDir invCommand = invVerb mapCommand = mapVerb", "s, l, t): iname = \" \".join(t) if iname not in Item.items: raise", "itemRef(\"item\") takeCommand = takeVerb + itemRef(\"item\") useCommand = useVerb + itemRef(\"usedObj\") + \\", "openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand | mapCommand | useCommand", "useVerb + itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN ON\", caseless=True)) + \\ Optional(itemRef, default=None)(\"targetObj\") openCommand", "random from pyparsing import alphas, empty, oneOf, replaceWith from pyparsing import CaselessLiteral, OneOrMore,", "moveCommand | lookCommand | doorsCommand | helpCommand | quitCommand | readCommand).setResultsName(\"command\") + LineEnd()", "understand that.\", \"Say what?\", \"Whatchyoo talkin' 'bout, Willis?\", \"Huh?\", \"Garbage in, garbage out.", "lookVerb = makeCmd(\"LOOK L\") doorsVerb = CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H HELP ?\") readVerb", "such item '%s'.\" % iname) return iname def parseCmd(self, cmdstr): try: return self.bnf.parseString(cmdstr)", "validateItemName(self, s, l, t): iname = \" \".join(t) if iname not in Item.items:", "except ShellParseException, parseError: print \"ShellParseException: %s\" % parseError.msg except ParseException, parseError: return random.choice([\"Sorry,", "import alphas, empty, oneOf, replaceWith from pyparsing import CaselessLiteral, OneOrMore, Optional, ParseException from", "<filename>myriad/game/shell/grammar.py import random from pyparsing import alphas, empty, oneOf, replaceWith from pyparsing import", "openVerb + itemRef(\"item\") moveCommand = moveVerb + moveDirection(\"direction\") quitCommand = quitVerb lookCommand =", "dropCommand = dropVerb + itemRef(\"item\") takeCommand = takeVerb + itemRef(\"item\") useCommand = useVerb", "mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand) openCommand.setParseAction(command.OpenCommand) moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand", "makeCmd(\"OPEN O\") quitVerb = makeCmd(\"QUIT Q\") lookVerb = makeCmd(\"LOOK L\") doorsVerb = CaselessKeyword(\"DOORS\")", "lookCommand | doorsCommand | helpCommand | quitCommand | readCommand).setResultsName(\"command\") + LineEnd() def validateItemName(self,", "useCommand = useVerb + itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN ON\", caseless=True)) + \\ Optional(itemRef,", "makeDir(\"S SOUTH\") eDir = makeDir(\"E EAST\") wDir = makeDir(\"W WEST\") neDir = makeDir(\"NE", "l, t): iname = \" \".join(t) if iname not in Item.items: raise ShellParseException(s,", "s.split())) invVerb = makeCmd(\"INV INVENTORY I\") mapVerb = makeCmd(\"MAP M\") dropVerb = makeCmd(\"DROP", "pass class ShellParser(object): def __init__(self, session=None): self.session = session self.bnf = self.makeBNF() def", "| mapCommand | useCommand | openCommand | dropCommand | takeCommand | moveCommand |", "| eDir | wDir | neDir | seDir | swDir \\ | nwDir", "__init__(self, session=None): self.session = session self.bnf = self.makeBNF() def makeCommandParseAction(self, cls): def cmdParseAction(s,", "Optional(oneOf(\"IN ON\", caseless=True)) + \\ Optional(itemRef, default=None)(\"targetObj\") openCommand = openVerb + itemRef(\"item\") moveCommand", "= invVerb mapCommand = mapVerb dropCommand = dropVerb + itemRef(\"item\") takeCommand = takeVerb", "helpCommand | quitCommand | readCommand).setResultsName(\"command\") + LineEnd() def validateItemName(self, s, l, t): iname", "= \" \".join(t) if iname not in Item.items: raise ShellParseException(s, l, \"No such", "makeCmd(\"MAP M\") dropVerb = makeCmd(\"DROP LEAVE\") takeVerb = makeCmd(\"TAKE PICKUP\") | \\ (CaselessLiteral(\"PICK\")", "iname = \" \".join(t) if iname not in Item.items: raise ShellParseException(s, l, \"No", "?\") readVerb = CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda s : makeCmd(s).setParseAction(", "iname) return iname def parseCmd(self, cmdstr): try: return self.bnf.parseString(cmdstr) except ShellParseException, parseError: print", "nwDir invCommand = invVerb mapCommand = mapVerb dropCommand = dropVerb + itemRef(\"item\") takeCommand", "% parseError.msg except ParseException, parseError: return random.choice([\"Sorry, I don't understand that.\", \"Say what?\",", "invVerb mapCommand = mapVerb dropCommand = dropVerb + itemRef(\"item\") takeCommand = takeVerb +", "| dropCommand | takeCommand | moveCommand | lookCommand | doorsCommand | helpCommand |", "self.bnf.parseString(cmdstr) except ShellParseException, parseError: print \"ShellParseException: %s\" % parseError.msg except ParseException, parseError: return", "= makeCmd(\"TAKE PICKUP\") | \\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE GO\") |", "iname not in Item.items: raise ShellParseException(s, l, \"No such item '%s'.\" % iname)", "seDir = makeDir(\"SE SOUTHEAST\") swDir = makeDir(\"SW SOUTHWEST\") nwDir = makeDir(\"NW NORTHWEST\") moveDirection", "moveDirection(\"direction\") quitCommand = quitVerb lookCommand = lookVerb doorsCommand = doorsVerb helpCommand = helpVerb", "mapVerb = makeCmd(\"MAP M\") dropVerb = makeCmd(\"DROP LEAVE\") takeVerb = makeCmd(\"TAKE PICKUP\") |", "= dropVerb + itemRef(\"item\") takeCommand = takeVerb + itemRef(\"item\") useCommand = useVerb +", "garbage out. Try again.\", \"What was the middle part again?\", \"Excuse me?\", \"Wtf?\",", "\".join(t) if iname not in Item.items: raise ShellParseException(s, l, \"No such item '%s'.\"", "SOUTHEAST\") swDir = makeDir(\"SW SOUTHWEST\") nwDir = makeDir(\"NW NORTHWEST\") moveDirection = nDir |", "| takeCommand | moveCommand | lookCommand | doorsCommand | helpCommand | quitCommand |", "= nDir | sDir | eDir | wDir | neDir | seDir |", "makeCmd(\"USE U\") openVerb = makeCmd(\"OPEN O\") quitVerb = makeCmd(\"QUIT Q\") lookVerb = makeCmd(\"LOOK", "(CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE GO\") | empty useVerb = makeCmd(\"USE U\")", "useVerb = makeCmd(\"USE U\") openVerb = makeCmd(\"OPEN O\") quitVerb = makeCmd(\"QUIT Q\") lookVerb", "= makeCmd(\"OPEN O\") quitVerb = makeCmd(\"QUIT Q\") lookVerb = makeCmd(\"LOOK L\") doorsVerb =", "= mapVerb dropCommand = dropVerb + itemRef(\"item\") takeCommand = takeVerb + itemRef(\"item\") useCommand", "oneOf, replaceWith from pyparsing import CaselessLiteral, OneOrMore, Optional, ParseException from pyparsing import CaselessKeyword,", "\\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE GO\") | empty useVerb = makeCmd(\"USE", "Optional(itemRef, default=None)(\"targetObj\") openCommand = openVerb + itemRef(\"item\") moveCommand = moveVerb + moveDirection(\"direction\") quitCommand", "%s\" % parseError.msg except ParseException, parseError: return random.choice([\"Sorry, I don't understand that.\", \"Say", "= lambda s: MatchFirst(map(CaselessKeyword, s.split())) invVerb = makeCmd(\"INV INVENTORY I\") mapVerb = makeCmd(\"MAP", "moveVerb + moveDirection(\"direction\") quitCommand = quitVerb lookCommand = lookVerb doorsCommand = doorsVerb helpCommand", "import command from myriad.item import Item class ShellParseException(ParseException): pass class ShellParser(object): def __init__(self,", "| \\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE GO\") | empty useVerb =", "| helpCommand | quitCommand | readCommand).setResultsName(\"command\") + LineEnd() def validateItemName(self, s, l, t):", "helpCommand = helpVerb readCommand = readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand) takeCommand.setParseAction(command.TakeCommand) useCommand.setParseAction(command.UseCommand)", "import Item class ShellParseException(ParseException): pass class ShellParser(object): def __init__(self, session=None): self.session = session", "self.bnf = self.makeBNF() def makeCommandParseAction(self, cls): def cmdParseAction(s, l, tokens): return cls(tokens) return", "+ moveDirection(\"direction\") quitCommand = quitVerb lookCommand = lookVerb doorsCommand = doorsVerb helpCommand =", "sDir = makeDir(\"S SOUTH\") eDir = makeDir(\"E EAST\") wDir = makeDir(\"W WEST\") neDir", "def makeCommandParseAction(self, cls): def cmdParseAction(s, l, tokens): return cls(tokens) return cmdParseAction def makeBNF(self):", "| moveCommand | lookCommand | doorsCommand | helpCommand | quitCommand | readCommand).setResultsName(\"command\") +", "quitCommand = quitVerb lookCommand = lookVerb doorsCommand = doorsVerb helpCommand = helpVerb readCommand", "item '%s'.\" % iname) return iname def parseCmd(self, cmdstr): try: return self.bnf.parseString(cmdstr) except", "moveDirection = nDir | sDir | eDir | wDir | neDir | seDir", "= makeDir(\"S SOUTH\") eDir = makeDir(\"E EAST\") wDir = makeDir(\"W WEST\") neDir =", "caseless=True)) + \\ Optional(itemRef, default=None)(\"targetObj\") openCommand = openVerb + itemRef(\"item\") moveCommand = moveVerb", "| empty useVerb = makeCmd(\"USE U\") openVerb = makeCmd(\"OPEN O\") quitVerb = makeCmd(\"QUIT", "in, garbage out. Try again.\", \"What was the middle part again?\", \"Excuse me?\",", "makeDir(\"SE SOUTHEAST\") swDir = makeDir(\"SW SOUTHWEST\") nwDir = makeDir(\"NW NORTHWEST\") moveDirection = nDir", "HELP ?\") readVerb = CaselessKeyword(\"READ\") itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda s :", "| lookCommand | doorsCommand | helpCommand | quitCommand | readCommand).setResultsName(\"command\") + LineEnd() def", "from pyparsing import CaselessLiteral, OneOrMore, Optional, ParseException from pyparsing import CaselessKeyword, LineEnd, MatchFirst,", "that.\", \"Say what?\", \"Whatchyoo talkin' 'bout, Willis?\", \"Huh?\", \"Garbage in, garbage out. Try", "makeCmd(\"TAKE PICKUP\") | \\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE GO\") | empty", "MatchFirst, Word from myriad.game.shell import command from myriad.item import Item class ShellParseException(ParseException): pass", "tokens): return cls(tokens) return cmdParseAction def makeBNF(self): makeCmd = lambda s: MatchFirst(map(CaselessKeyword, s.split()))", "lambda s : makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir = makeDir(\"N NORTH\") sDir = makeDir(\"S SOUTH\")", "| openCommand | dropCommand | takeCommand | moveCommand | lookCommand | doorsCommand |", "= makeCmd(\"USE U\") openVerb = makeCmd(\"OPEN O\") quitVerb = makeCmd(\"QUIT Q\") lookVerb =", "iname def parseCmd(self, cmdstr): try: return self.bnf.parseString(cmdstr) except ShellParseException, parseError: print \"ShellParseException: %s\"", "makeCmd(\"MOVE GO\") | empty useVerb = makeCmd(\"USE U\") openVerb = makeCmd(\"OPEN O\") quitVerb", "= doorsVerb helpCommand = helpVerb readCommand = readVerb + itemRef(\"subjectObj\") invCommand.setParseAction(command.InventoryCommand) mapCommand.setParseAction(command.MapCommand) dropCommand.setParseAction(command.DropCommand)", "| neDir | seDir | swDir \\ | nwDir invCommand = invVerb mapCommand", "random.choice([\"Sorry, I don't understand that.\", \"Say what?\", \"Whatchyoo talkin' 'bout, Willis?\", \"Huh?\", \"Garbage", "doorsVerb = CaselessKeyword(\"DOORS\") helpVerb = makeCmd(\"H HELP ?\") readVerb = CaselessKeyword(\"READ\") itemRef =", "| readCommand).setResultsName(\"command\") + LineEnd() def validateItemName(self, s, l, t): iname = \" \".join(t)", "= makeDir(\"SW SOUTHWEST\") nwDir = makeDir(\"NW NORTHWEST\") moveDirection = nDir | sDir |", "cls): def cmdParseAction(s, l, tokens): return cls(tokens) return cmdParseAction def makeBNF(self): makeCmd =", "| useCommand | openCommand | dropCommand | takeCommand | moveCommand | lookCommand |", "eDir | wDir | neDir | seDir | swDir \\ | nwDir invCommand", "ShellParseException(s, l, \"No such item '%s'.\" % iname) return iname def parseCmd(self, cmdstr):", "PICKUP\") | \\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE GO\") | empty useVerb", "M\") dropVerb = makeCmd(\"DROP LEAVE\") takeVerb = makeCmd(\"TAKE PICKUP\") | \\ (CaselessLiteral(\"PICK\") +", "% iname) return iname def parseCmd(self, cmdstr): try: return self.bnf.parseString(cmdstr) except ShellParseException, parseError:", "ParseException from pyparsing import CaselessKeyword, LineEnd, MatchFirst, Word from myriad.game.shell import command from", "= makeCmd(\"INV INVENTORY I\") mapVerb = makeCmd(\"MAP M\") dropVerb = makeCmd(\"DROP LEAVE\") takeVerb", "doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand | mapCommand | useCommand | openCommand | dropCommand", "makeDir = lambda s : makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir = makeDir(\"N NORTH\") sDir =", "replaceWith from pyparsing import CaselessLiteral, OneOrMore, Optional, ParseException from pyparsing import CaselessKeyword, LineEnd,", "parseError: return random.choice([\"Sorry, I don't understand that.\", \"Say what?\", \"Whatchyoo talkin' 'bout, Willis?\",", "quitVerb = makeCmd(\"QUIT Q\") lookVerb = makeCmd(\"LOOK L\") doorsVerb = CaselessKeyword(\"DOORS\") helpVerb =", "from myriad.game.shell import command from myriad.item import Item class ShellParseException(ParseException): pass class ShellParser(object):", "nDir | sDir | eDir | wDir | neDir | seDir | swDir", "NORTHEAST\") seDir = makeDir(\"SE SOUTHEAST\") swDir = makeDir(\"SW SOUTHWEST\") nwDir = makeDir(\"NW NORTHWEST\")", "lookCommand = lookVerb doorsCommand = doorsVerb helpCommand = helpVerb readCommand = readVerb +", "lambda s: MatchFirst(map(CaselessKeyword, s.split())) invVerb = makeCmd(\"INV INVENTORY I\") mapVerb = makeCmd(\"MAP M\")", "\"Whatchyoo talkin' 'bout, Willis?\", \"Huh?\", \"Garbage in, garbage out. Try again.\", \"What was", "makeDir(\"N NORTH\") sDir = makeDir(\"S SOUTH\") eDir = makeDir(\"E EAST\") wDir = makeDir(\"W", "don't understand that.\", \"Say what?\", \"Whatchyoo talkin' 'bout, Willis?\", \"Huh?\", \"Garbage in, garbage", "NORTHWEST\") moveDirection = nDir | sDir | eDir | wDir | neDir |", "| swDir \\ | nwDir invCommand = invVerb mapCommand = mapVerb dropCommand =", "\"No such item '%s'.\" % iname) return iname def parseCmd(self, cmdstr): try: return", "openVerb = makeCmd(\"OPEN O\") quitVerb = makeCmd(\"QUIT Q\") lookVerb = makeCmd(\"LOOK L\") doorsVerb", "moveCommand.setParseAction(command.MoveCommand) quitCommand.setParseAction(command.QuitCommand) lookCommand.setParseAction(command.LookCommand) doorsCommand.setParseAction(command.DoorsCommand) helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand | mapCommand | useCommand |", "return cmdParseAction def makeBNF(self): makeCmd = lambda s: MatchFirst(map(CaselessKeyword, s.split())) invVerb = makeCmd(\"INV", "+ LineEnd() def validateItemName(self, s, l, t): iname = \" \".join(t) if iname", "+ itemRef(\"usedObj\") + \\ Optional(oneOf(\"IN ON\", caseless=True)) + \\ Optional(itemRef, default=None)(\"targetObj\") openCommand =", "= OneOrMore(Word(alphas)).setParseAction(self.validateItemName) makeDir = lambda s : makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir = makeDir(\"N NORTH\")", "| sDir | eDir | wDir | neDir | seDir | swDir \\", "parseError.msg except ParseException, parseError: return random.choice([\"Sorry, I don't understand that.\", \"Say what?\", \"Whatchyoo", "self.makeBNF() def makeCommandParseAction(self, cls): def cmdParseAction(s, l, tokens): return cls(tokens) return cmdParseAction def", "+ itemRef(\"item\") moveCommand = moveVerb + moveDirection(\"direction\") quitCommand = quitVerb lookCommand = lookVerb", "cls(tokens) return cmdParseAction def makeBNF(self): makeCmd = lambda s: MatchFirst(map(CaselessKeyword, s.split())) invVerb =", "= makeCmd(\"DROP LEAVE\") takeVerb = makeCmd(\"TAKE PICKUP\") | \\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb", "moveVerb = makeCmd(\"MOVE GO\") | empty useVerb = makeCmd(\"USE U\") openVerb = makeCmd(\"OPEN", "= makeDir(\"E EAST\") wDir = makeDir(\"W WEST\") neDir = makeDir(\"NE NORTHEAST\") seDir =", "seDir | swDir \\ | nwDir invCommand = invVerb mapCommand = mapVerb dropCommand", "= lambda s : makeCmd(s).setParseAction( replaceWith(s.split()[0])) nDir = makeDir(\"N NORTH\") sDir = makeDir(\"S", "Try again.\", \"What was the middle part again?\", \"Excuse me?\", \"Wtf?\", \"Uh... what?\"])", "quitVerb lookCommand = lookVerb doorsCommand = doorsVerb helpCommand = helpVerb readCommand = readVerb", "takeVerb = makeCmd(\"TAKE PICKUP\") | \\ (CaselessLiteral(\"PICK\") + CaselessLiteral(\"UP\")) moveVerb = makeCmd(\"MOVE GO\")", "= moveVerb + moveDirection(\"direction\") quitCommand = quitVerb lookCommand = lookVerb doorsCommand = doorsVerb", "helpCommand.setParseAction(command.HelpCommand) readCommand.setParseAction(command.ReadCommand) return (invCommand | mapCommand | useCommand | openCommand | dropCommand |", "makeDir(\"E EAST\") wDir = makeDir(\"W WEST\") neDir = makeDir(\"NE NORTHEAST\") seDir = makeDir(\"SE", "eDir = makeDir(\"E EAST\") wDir = makeDir(\"W WEST\") neDir = makeDir(\"NE NORTHEAST\") seDir", "nDir = makeDir(\"N NORTH\") sDir = makeDir(\"S SOUTH\") eDir = makeDir(\"E EAST\") wDir" ]
[ "<= delta: if abs(self.ratio_X - other.ratio_X) <= delta: if abs(self.ratio_2 - other.ratio_2) <=", "= self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2] outcomes_by_rank = {rank: [] for rank in RANKS}", "delta: return True return False def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if", "self.country = country self.tournament = tournament self.ratio_1 = round(float(ratio_1), 2) self.ratio_X = round(float(ratio_X),", "AbstractStats class RatioStats(AbstractStats): KEYS = [\"date\", \"country\", \"tournament\", \"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\",", "other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta: if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta:", "\"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1, ratio_X, ratio_2, host_team=\"\", guest_team=\"\",", "other.ratio_perc_1_2) <= delta: return True return False def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) ->", "= round(((self.ratio_X / self.ratio_2) * 100), 2) self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2) *", "True return False def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats):", "\"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1, ratio_X, ratio_2,", "abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta: return True return False def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\",", "-> bool: if isinstance(other, RatioStats): if abs(self.ratio_1 - other.ratio_1) <= delta: if abs(self.ratio_X", "\"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\",", "= \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X) * 100), 2)", "self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2]", "- other.ratio_perc_X_2) <= delta: if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta: return True return", "delta: if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta: if abs(self.ratio_perc_min_max - other.ratio_perc_min_max) <= delta:", "bets.model.stats.constants import RANKS, OUTCOMES from bets.model.stats.abstract_stats import AbstractStats class RatioStats(AbstractStats): KEYS = [\"date\",", "return False def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if", "* 100), 2) self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max) * 100), 2) self.ratio_mean =", "= round(float(ratio_2), 2) self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min =", "<= delta: return True return False def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool:", "self.ratio_geometric_mean) * 100), 2) def is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other,", "2) def is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_1", "100), 2) self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med) * 100), 2) self.ratio_perc_med_max = round(((self.ratio_med", "= round(float(ratio_X), 2) self.ratio_2 = round(float(ratio_2), 2) self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted", "[] for rank in RANKS} ranks_by_outcome = {outcome: [] for outcome in OUTCOMES}", "self.ratio_X = round(float(ratio_X), 2) self.ratio_2 = round(float(ratio_2), 2) self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2)", "= \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X =", "self.ratio_mean = round(((self.ratio_1 + self.ratio_X + self.ratio_2) / 3), 2) self.ratio_geometric_mean = round(((self.ratio_1", "OUTCOMES from bets.model.stats.abstract_stats import AbstractStats class RatioStats(AbstractStats): KEYS = [\"date\", \"country\", \"tournament\", \"host_team\",", "\"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1, ratio_X, ratio_2, host_team=\"\", guest_team=\"\", date=\"\",", "100), 2) def is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if", "/ self.ratio_geometric_mean) * 100), 2) def is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05) -> bool: if", "100), 2) self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2) * 100), 2) self.ratio_perc_1_2 = round(((self.ratio_1", "self.ratio_2) / 3), 2) self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X * self.ratio_2) ** (1", "* 100), 2) self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max) * 100), 2) self.ratio_perc_min_max =", "= tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2] outcomes_by_rank =", "self.ratio_X, self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1] self.ratio_max =", "self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 =", "\"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1, ratio_X, ratio_2, host_team=\"\", guest_team=\"\", date=\"\", country=\"\", tournament=\"\"):", "\"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1,", "delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_1 - other.ratio_1) <= delta: if", "* self.ratio_2) ** (1 / 3)), 2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean) *", "self.ratio_X) * 100), 2) self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2) * 100), 2) self.ratio_perc_1_2", "\"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1, ratio_X, ratio_2, host_team=\"\", guest_team=\"\", date=\"\", country=\"\", tournament=\"\"): self.host_team", "RatioStats): if abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta: if abs(self.ratio_perc_1_2", "round(((self.ratio_X / self.ratio_2) * 100), 2) self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2) * 100),", "\"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self,", "\"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"])", "[] for outcome in OUTCOMES} for outcome in OUTCOMES: for rank in RANKS:", "if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta: if abs(self.ratio_perc_min_max - other.ratio_perc_min_max) <= delta: return", "ranks_by_outcome = {outcome: [] for outcome in OUTCOMES} for outcome in OUTCOMES: for", "\"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1, ratio_X, ratio_2, host_team=\"\", guest_team=\"\", date=\"\", country=\"\", tournament=\"\"): self.host_team =", "isinstance(other, RatioStats): if abs(self.ratio_1 - other.ratio_1) <= delta: if abs(self.ratio_X - other.ratio_X) <=", "is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_1 - other.ratio_1)", "bool: if isinstance(other, RatioStats): if abs(self.ratio_1 - other.ratio_1) <= delta: if abs(self.ratio_X -", "\"country\", \"tournament\", \"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\",", "def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_min_med -", "round(((self.ratio_min / self.ratio_med) * 100), 2) self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max) * 100),", "import AbstractStats class RatioStats(AbstractStats): KEYS = [\"date\", \"country\", \"tournament\", \"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\",", "self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2] outcomes_by_rank = {rank: [] for rank", "self.host_team = host_team self.guest_team = guest_team self.date = date self.country = country self.tournament", "date=\"\", country=\"\", tournament=\"\"): self.host_team = host_team self.guest_team = guest_team self.date = date self.country", "= {rank: [] for rank in RANKS} ranks_by_outcome = {outcome: [] for outcome", "return True return False def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other,", "delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 -", "RatioStats): if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta: if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta:", "country self.tournament = tournament self.ratio_1 = round(float(ratio_1), 2) self.ratio_X = round(float(ratio_X), 2) self.ratio_2", "__init__(self, ratio_1, ratio_X, ratio_2, host_team=\"\", guest_team=\"\", date=\"\", country=\"\", tournament=\"\"): self.host_team = host_team self.guest_team", "{outcome: [] for outcome in OUTCOMES} for outcome in OUTCOMES: for rank in", "= country self.tournament = tournament self.ratio_1 = round(float(ratio_1), 2) self.ratio_X = round(float(ratio_X), 2)", "self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min", "round(((self.ratio_1 / self.ratio_2) * 100), 2) self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med) * 100),", "self.ratio_1 = round(float(ratio_1), 2) self.ratio_X = round(float(ratio_X), 2) self.ratio_2 = round(float(ratio_2), 2) self.ratios", "\"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_1 - other.ratio_1) <= delta:", "other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if", "return True return False def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other,", "2) self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med) * 100), 2) self.ratio_perc_med_max = round(((self.ratio_med /", "+ self.ratio_2) / 3), 2) self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X * self.ratio_2) **", "round(((self.ratio_mean / self.ratio_geometric_mean) * 100), 2) def is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05) -> bool:", "= guest_team self.date = date self.country = country self.tournament = tournament self.ratio_1 =", "self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X", "= round(((self.ratio_min / self.ratio_max) * 100), 2) self.ratio_mean = round(((self.ratio_1 + self.ratio_X +", "3)), 2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean) * 100), 2) def is_having_similar_ratios_to(self, other:", "\"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1, ratio_X, ratio_2, host_team=\"\",", "abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta: if abs(self.ratio_perc_min_max - other.ratio_perc_min_max) <= delta: return True", "for outcome in OUTCOMES} for outcome in OUTCOMES: for rank in RANKS: if", "if abs(self.ratio_1 - other.ratio_1) <= delta: if abs(self.ratio_X - other.ratio_X) <= delta: if", "= {outcome: [] for outcome in OUTCOMES} for outcome in OUTCOMES: for rank", "= \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max =", "/ 3)), 2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean) * 100), 2) def is_having_similar_ratios_to(self,", "tournament=\"\"): self.host_team = host_team self.guest_team = guest_team self.date = date self.country = country", "round(((self.ratio_1 / self.ratio_X) * 100), 2) self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2) * 100),", "self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med", "self.ratio_min = self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2] outcomes_by_rank = {rank: []", "False def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_min_med", "self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0] self.ratio_med =", "2) self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max) * 100), 2) self.ratio_mean = round(((self.ratio_1 +", "delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta: if", "-> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2)", "self.guest_team = guest_team self.date = date self.country = country self.tournament = tournament self.ratio_1", "\"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta:", "* 100), 2) def is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats):", "<= delta: if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta: if abs(self.ratio_perc_min_max - other.ratio_perc_min_max) <=", "\"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\",", "self.ratio_max = self.ratios_sorted[2] outcomes_by_rank = {rank: [] for rank in RANKS} ranks_by_outcome =", "abs(self.ratio_1 - other.ratio_1) <= delta: if abs(self.ratio_X - other.ratio_X) <= delta: if abs(self.ratio_2", "{rank: [] for rank in RANKS} ranks_by_outcome = {outcome: [] for outcome in", "* 100), 2) self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med) * 100), 2) self.ratio_perc_med_max =", "- other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta: if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <=", "* 100), 2) self.ratio_mean = round(((self.ratio_1 + self.ratio_X + self.ratio_2) / 3), 2)", "2) self.ratio_2 = round(float(ratio_2), 2) self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios))", "/ self.ratio_X) * 100), 2) self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2) * 100), 2)", "= round(((self.ratio_1 / self.ratio_X) * 100), 2) self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2) *", "self.ratio_X * self.ratio_2) ** (1 / 3)), 2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean)", "- other.ratio_perc_1_2) <= delta: return True return False def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05)", "RatioStats(AbstractStats): KEYS = [\"date\", \"country\", \"tournament\", \"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\",", "\"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\",", "bets.model.stats.abstract_stats import AbstractStats class RatioStats(AbstractStats): KEYS = [\"date\", \"country\", \"tournament\", \"host_team\", \"guest_team\", \"ratio_1\",", "self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X) * 100), 2) self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2)", "self.ratio_2) * 100), 2) self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2) * 100), 2) self.ratio_perc_min_med", "self.ratio_2) * 100), 2) self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med) * 100), 2) self.ratio_perc_med_max", "\"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X) * 100), 2) self.ratio_perc_X_2 = round(((self.ratio_X /", "for rank in RANKS: if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"])", "\"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X) *", "from bets.model.stats.constants import RANKS, OUTCOMES from bets.model.stats.abstract_stats import AbstractStats class RatioStats(AbstractStats): KEYS =", "\"tournament\", \"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\",", "self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1 /", "= host_team self.guest_team = guest_team self.date = date self.country = country self.tournament =", "def is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_1 -", "self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med) * 100), 2) self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max)", "round(float(ratio_1), 2) self.ratio_X = round(float(ratio_X), 2) self.ratio_2 = round(float(ratio_2), 2) self.ratios = (self.ratio_1,", "+ self.ratio_X + self.ratio_2) / 3), 2) self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X *", "/ self.ratio_med) * 100), 2) self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max) * 100), 2)", "for outcome in OUTCOMES: for rank in RANKS: if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome)", "3), 2) self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X * self.ratio_2) ** (1 / 3)),", "in RANKS: if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X =", "host_team self.guest_team = guest_team self.date = date self.country = country self.tournament = tournament", "ratio_X, ratio_2, host_team=\"\", guest_team=\"\", date=\"\", country=\"\", tournament=\"\"): self.host_team = host_team self.guest_team = guest_team", "if abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta: if abs(self.ratio_perc_1_2 -", "if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta: return True return False def is_having_similar_rank_ratio_percentages_to(self, other:", "other.ratio_perc_med_max) <= delta: if abs(self.ratio_perc_min_max - other.ratio_perc_min_max) <= delta: return True return False", "guest_team self.date = date self.country = country self.tournament = tournament self.ratio_1 = round(float(ratio_1),", "RANKS} ranks_by_outcome = {outcome: [] for outcome in OUTCOMES} for outcome in OUTCOMES:", "* 100), 2) self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2) * 100), 2) self.ratio_perc_min_med =", "100), 2) self.ratio_mean = round(((self.ratio_1 + self.ratio_X + self.ratio_2) / 3), 2) self.ratio_geometric_mean", "bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <=", "100), 2) self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max) * 100), 2) self.ratio_mean = round(((self.ratio_1", "if isinstance(other, RatioStats): if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta: if abs(self.ratio_perc_med_max - other.ratio_perc_med_max)", "outcome in OUTCOMES} for outcome in OUTCOMES: for rank in RANKS: if self[f\"ratio_{outcome}\"]", "return False def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if", "\"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X) * 100), 2) self.ratio_perc_X_2", "delta: if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta: return True return False def is_having_similar_rank_ratio_percentages_to(self,", "if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta: if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta: if", "abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta: if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta: if abs(self.ratio_perc_min_max", "self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2] outcomes_by_rank", "tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2] outcomes_by_rank = {rank:", "\"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1, ratio_X, ratio_2, host_team=\"\", guest_team=\"\", date=\"\", country=\"\",", "outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min =", "/ self.ratio_2) * 100), 2) self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2) * 100), 2)", "def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_1_X -", "- other.ratio_1) <= delta: if abs(self.ratio_X - other.ratio_X) <= delta: if abs(self.ratio_2 -", "delta: if abs(self.ratio_X - other.ratio_X) <= delta: if abs(self.ratio_2 - other.ratio_2) <= delta:", "self.ratios_sorted[2] outcomes_by_rank = {rank: [] for rank in RANKS} ranks_by_outcome = {outcome: []", "\"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def", "tournament self.ratio_1 = round(float(ratio_1), 2) self.ratio_X = round(float(ratio_X), 2) self.ratio_2 = round(float(ratio_2), 2)", "if isinstance(other, RatioStats): if abs(self.ratio_1 - other.ratio_1) <= delta: if abs(self.ratio_X - other.ratio_X)", "- other.ratio_perc_med_max) <= delta: if abs(self.ratio_perc_min_max - other.ratio_perc_min_max) <= delta: return True return", "\"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\",", "in RANKS} ranks_by_outcome = {outcome: [] for outcome in OUTCOMES} for outcome in", "self.ratio_max) * 100), 2) self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max) * 100), 2) self.ratio_mean", "delta: return True return False def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if", "KEYS = [\"date\", \"country\", \"tournament\", \"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\",", "host_team=\"\", guest_team=\"\", date=\"\", country=\"\", tournament=\"\"): self.host_team = host_team self.guest_team = guest_team self.date =", "2) self.ratio_X = round(float(ratio_X), 2) self.ratio_2 = round(float(ratio_2), 2) self.ratios = (self.ratio_1, self.ratio_X,", "self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max) * 100), 2) self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max)", "\"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\",", "- other.ratio_perc_min_med) <= delta: if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta: if abs(self.ratio_perc_min_max -", "2) self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X * self.ratio_2) ** (1 / 3)), 2)", "- other.ratio_X) <= delta: if abs(self.ratio_2 - other.ratio_2) <= delta: return True return", "round(((self.ratio_1 + self.ratio_X + self.ratio_2) / 3), 2) self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X", "100), 2) self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2) * 100), 2) self.ratio_perc_min_med = round(((self.ratio_min", "\"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\",", "ratio_1, ratio_X, ratio_2, host_team=\"\", guest_team=\"\", date=\"\", country=\"\", tournament=\"\"): self.host_team = host_team self.guest_team =", "for rank in RANKS} ranks_by_outcome = {outcome: [] for outcome in OUTCOMES} for", "abs(self.ratio_X - other.ratio_X) <= delta: if abs(self.ratio_2 - other.ratio_2) <= delta: return True", "date self.country = country self.tournament = tournament self.ratio_1 = round(float(ratio_1), 2) self.ratio_X =", "self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X * self.ratio_2) ** (1 / 3)), 2) self.ratio_perc_mean_geometric_mean", "2) self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2) * 100), 2) self.ratio_perc_1_2 = round(((self.ratio_1 /", "other.ratio_1) <= delta: if abs(self.ratio_X - other.ratio_X) <= delta: if abs(self.ratio_2 - other.ratio_2)", "abs(self.ratio_2 - other.ratio_2) <= delta: return True return False def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\",", "other.ratio_X) <= delta: if abs(self.ratio_2 - other.ratio_2) <= delta: return True return False", "= (self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1]", "\"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1", "* 100), 2) self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2) * 100), 2) self.ratio_perc_1_2 =", "other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_1 - other.ratio_1) <=", "\"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if abs(self.ratio_perc_X_2", "2) self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0] self.ratio_med", "self.ratio_2 = round(float(ratio_2), 2) self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min", "outcomes_by_rank = {rank: [] for rank in RANKS} ranks_by_outcome = {outcome: [] for", "2) self.ratio_mean = round(((self.ratio_1 + self.ratio_X + self.ratio_2) / 3), 2) self.ratio_geometric_mean =", "<= delta: return True return False def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool:", "= \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X)", "/ self.ratio_2) * 100), 2) self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med) * 100), 2)", "self.ratio_X + self.ratio_2) / 3), 2) self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X * self.ratio_2)", "= self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2] outcomes_by_rank = {rank: [] for", "abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta: if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta: return True", "OUTCOMES} for outcome in OUTCOMES: for rank in RANKS: if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]:", "RANKS: if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"])", "self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2) * 100), 2) self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2)", "OUTCOMES: for rank in RANKS: if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 =", "\"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\",", "[\"date\", \"country\", \"tournament\", \"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\",", "= \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X) * 100), 2) self.ratio_perc_X_2 = round(((self.ratio_X", "ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"])", "bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta: if abs(self.ratio_perc_med_max -", "other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <=", "self.ratio_2) ** (1 / 3)), 2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean) * 100),", "True return False def is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats):", "isinstance(other, RatioStats): if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta: if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <=", "self.date = date self.country = country self.tournament = tournament self.ratio_1 = round(float(ratio_1), 2)", "self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2] outcomes_by_rank = {rank: [] for rank in RANKS} ranks_by_outcome", "round(((self.ratio_med / self.ratio_max) * 100), 2) self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max) * 100),", "100), 2) self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max) * 100), 2) self.ratio_perc_min_max = round(((self.ratio_min", "ratio_2, host_team=\"\", guest_team=\"\", date=\"\", country=\"\", tournament=\"\"): self.host_team = host_team self.guest_team = guest_team self.date", "2) self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max) * 100), 2) self.ratio_perc_min_max = round(((self.ratio_min /", "is_having_similar_rank_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_min_med - other.ratio_perc_min_med)", "def __init__(self, ratio_1, ratio_X, ratio_2, host_team=\"\", guest_team=\"\", date=\"\", country=\"\", tournament=\"\"): self.host_team = host_team", "self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max", "= round(((self.ratio_med / self.ratio_max) * 100), 2) self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max) *", "/ 3), 2) self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X * self.ratio_2) ** (1 /", "is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_1_X - other.ratio_perc_1_X):", "(1 / 3)), 2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean) * 100), 2) def", "delta: if abs(self.ratio_2 - other.ratio_2) <= delta: return True return False def is_having_similar_outcome_ratio_percentages_to(self,", "if abs(self.ratio_2 - other.ratio_2) <= delta: return True return False def is_having_similar_outcome_ratio_percentages_to(self, other:", "= \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med =", "False def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) -> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_1_X", "isinstance(other, RatioStats): if abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta: if", "* self.ratio_X * self.ratio_2) ** (1 / 3)), 2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean /", "\"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\",", "self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean) * 100), 2) def is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05)", "round(float(ratio_2), 2) self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0]", "country=\"\", tournament=\"\"): self.host_team = host_team self.guest_team = guest_team self.date = date self.country =", "= round(float(ratio_1), 2) self.ratio_X = round(float(ratio_X), 2) self.ratio_2 = round(float(ratio_2), 2) self.ratios =", "(self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted = tuple(sorted(self.ratios)) self.ratio_min = self.ratios_sorted[0] self.ratio_med = self.ratios_sorted[1] self.ratio_max", "other.ratio_perc_min_med) <= delta: if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta: if abs(self.ratio_perc_min_max - other.ratio_perc_min_max)", "RatioStats): if abs(self.ratio_1 - other.ratio_1) <= delta: if abs(self.ratio_X - other.ratio_X) <= delta:", "self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max) * 100), 2) self.ratio_mean = round(((self.ratio_1 + self.ratio_X", "RANKS, OUTCOMES from bets.model.stats.abstract_stats import AbstractStats class RatioStats(AbstractStats): KEYS = [\"date\", \"country\", \"tournament\",", "self.ratio_med) * 100), 2) self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max) * 100), 2) self.ratio_perc_min_max", "other.ratio_perc_X_2) <= delta: if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta: return True return False", "self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X) * 100), 2) self.ratio_perc_X_2 =", "<= delta: if abs(self.ratio_2 - other.ratio_2) <= delta: return True return False def", "rank in RANKS: if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X", "import RANKS, OUTCOMES from bets.model.stats.abstract_stats import AbstractStats class RatioStats(AbstractStats): KEYS = [\"date\", \"country\",", "= date self.country = country self.tournament = tournament self.ratio_1 = round(float(ratio_1), 2) self.ratio_X", "if abs(self.ratio_X - other.ratio_X) <= delta: if abs(self.ratio_2 - other.ratio_2) <= delta: return", "abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta: if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2)", "<= delta: if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta: return True return False def", "\"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"] def __init__(self, ratio_1, ratio_X,", "rank in RANKS} ranks_by_outcome = {outcome: [] for outcome in OUTCOMES} for outcome", "/ self.ratio_max) * 100), 2) self.ratio_mean = round(((self.ratio_1 + self.ratio_X + self.ratio_2) /", "\"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"]) self.outcome_min = \"/\".join(outcomes_by_rank[\"min\"]) self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"])", "if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta: if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta: return", "self.tournament = tournament self.ratio_1 = round(float(ratio_1), 2) self.ratio_X = round(float(ratio_X), 2) self.ratio_2 =", "if isinstance(other, RatioStats): if abs(self.ratio_perc_1_X - other.ratio_perc_1_X): if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta:", "from bets.model.stats.abstract_stats import AbstractStats class RatioStats(AbstractStats): KEYS = [\"date\", \"country\", \"tournament\", \"host_team\", \"guest_team\",", "in OUTCOMES} for outcome in OUTCOMES: for rank in RANKS: if self[f\"ratio_{outcome}\"] ==", "self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2) * 100), 2) self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med)", "self.ratio_max) * 100), 2) self.ratio_mean = round(((self.ratio_1 + self.ratio_X + self.ratio_2) / 3),", "\"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\",", "guest_team=\"\", date=\"\", country=\"\", tournament=\"\"): self.host_team = host_team self.guest_team = guest_team self.date = date", "== self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2 = \"/\".join(ranks_by_outcome[\"2\"])", "-> bool: if isinstance(other, RatioStats): if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta: if abs(self.ratio_perc_med_max", "= round(((self.ratio_min / self.ratio_med) * 100), 2) self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max) *", "= self.ratios_sorted[2] outcomes_by_rank = {rank: [] for rank in RANKS} ranks_by_outcome = {outcome:", "round(((self.ratio_min / self.ratio_max) * 100), 2) self.ratio_mean = round(((self.ratio_1 + self.ratio_X + self.ratio_2)", "round(((self.ratio_1 * self.ratio_X * self.ratio_2) ** (1 / 3)), 2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean", "= round(((self.ratio_1 / self.ratio_2) * 100), 2) self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med) *", "/ self.ratio_max) * 100), 2) self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max) * 100), 2)", "\"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\", \"ratio_perc_min_med\", \"ratio_perc_med_max\", \"ratio_perc_min_max\", \"ratio_mean\", \"ratio_geometric_mean\", \"ratio_perc_mean_geometric_mean\"]", "** (1 / 3)), 2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean) * 100), 2)", "= round(((self.ratio_mean / self.ratio_geometric_mean) * 100), 2) def is_having_similar_ratios_to(self, other: \"RatioStats\", delta=0.05) ->", "if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1 = \"/\".join(ranks_by_outcome[\"1\"]) self.rank_X = \"/\".join(ranks_by_outcome[\"X\"]) self.rank_2", "self.ratio_med = self.ratios_sorted[1] self.ratio_max = self.ratios_sorted[2] outcomes_by_rank = {rank: [] for rank in", "in OUTCOMES: for rank in RANKS: if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank) self.rank_1", "round(float(ratio_X), 2) self.ratio_2 = round(float(ratio_2), 2) self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2) self.ratios_sorted =", "outcome in OUTCOMES: for rank in RANKS: if self[f\"ratio_{outcome}\"] == self[f\"ratio_{rank}\"]: outcomes_by_rank[rank].append(outcome) ranks_by_outcome[outcome].append(rank)", "\"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\", \"ratio_med\", \"ratio_max\", \"outcome_min\", \"outcome_med\", \"outcome_max\", \"ratio_perc_1_X\", \"ratio_perc_X_2\", \"ratio_perc_1_2\",", "= [\"date\", \"country\", \"tournament\", \"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\", \"rank_X\", \"rank_2\", \"ratio_min\",", "self.outcome_med = \"/\".join(outcomes_by_rank[\"med\"]) self.outcome_max = \"/\".join(outcomes_by_rank[\"max\"]) self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X) * 100),", "= round(((self.ratio_1 + self.ratio_X + self.ratio_2) / 3), 2) self.ratio_geometric_mean = round(((self.ratio_1 *", "= round(((self.ratio_1 * self.ratio_X * self.ratio_2) ** (1 / 3)), 2) self.ratio_perc_mean_geometric_mean =", "2) self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean) * 100), 2) def is_having_similar_ratios_to(self, other: \"RatioStats\",", "- other.ratio_2) <= delta: return True return False def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05)", "2) self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2) * 100), 2) self.ratio_perc_min_med = round(((self.ratio_min /", "other.ratio_2) <= delta: return True return False def is_having_similar_outcome_ratio_percentages_to(self, other: \"RatioStats\", delta=0.05) ->", "= tournament self.ratio_1 = round(float(ratio_1), 2) self.ratio_X = round(float(ratio_X), 2) self.ratio_2 = round(float(ratio_2),", "class RatioStats(AbstractStats): KEYS = [\"date\", \"country\", \"tournament\", \"host_team\", \"guest_team\", \"ratio_1\", \"ratio_X\", \"ratio_2\", \"rank_1\"," ]
[ "#!flask/bin/python # This file is for starting up the server! from app import", "# This file is for starting up the server! from app import myapp", "This file is for starting up the server! from app import myapp myapp.run(debug=True)" ]
[ "def test_dataset_vboard(self): # from nlpsc.vboard.dataset import index from ..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/') dataset", "TestVBoard(object): def test_dataset_vboard(self): # from nlpsc.vboard.dataset import index from ..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/')", "DatasetVBoard class TestVBoard(object): def test_dataset_vboard(self): # from nlpsc.vboard.dataset import index from ..vboard import", "# from nlpsc.vboard.dataset import index from ..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/') dataset = Dataset(name='测试数据集')", "nlpsc.vboard.dataset import DatasetVBoard class TestVBoard(object): def test_dataset_vboard(self): # from nlpsc.vboard.dataset import index from", "from nlpsc.dataset import Dataset from nlpsc.vboard.dataset import DatasetVBoard class TestVBoard(object): def test_dataset_vboard(self): #", "class TestVBoard(object): def test_dataset_vboard(self): # from nlpsc.vboard.dataset import index from ..vboard import bottle", "test_dataset_vboard(self): # from nlpsc.vboard.dataset import index from ..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/') dataset =", "from ..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/') dataset = Dataset(name='测试数据集') dataset.add_header('F-no(int) F-text_a F-text_b L-label1(list) L-label2')", "# encoding:utf-8 from nlpsc.dataset import Dataset from nlpsc.vboard.dataset import DatasetVBoard class TestVBoard(object): def", "from nlpsc.vboard.dataset import index from ..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/') dataset = Dataset(name='测试数据集') dataset.add_header('F-no(int)", "Dataset from nlpsc.vboard.dataset import DatasetVBoard class TestVBoard(object): def test_dataset_vboard(self): # from nlpsc.vboard.dataset import", "nlpsc.dataset import Dataset from nlpsc.vboard.dataset import DatasetVBoard class TestVBoard(object): def test_dataset_vboard(self): # from", "encoding:utf-8 from nlpsc.dataset import Dataset from nlpsc.vboard.dataset import DatasetVBoard class TestVBoard(object): def test_dataset_vboard(self):", "index from ..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/') dataset = Dataset(name='测试数据集') dataset.add_header('F-no(int) F-text_a F-text_b L-label1(list)", "nlpsc.vboard.dataset import index from ..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/') dataset = Dataset(name='测试数据集') dataset.add_header('F-no(int) F-text_a", "import Dataset from nlpsc.vboard.dataset import DatasetVBoard class TestVBoard(object): def test_dataset_vboard(self): # from nlpsc.vboard.dataset", "from nlpsc.vboard.dataset import DatasetVBoard class TestVBoard(object): def test_dataset_vboard(self): # from nlpsc.vboard.dataset import index", "import index from ..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/') dataset = Dataset(name='测试数据集') dataset.add_header('F-no(int) F-text_a F-text_b", "import DatasetVBoard class TestVBoard(object): def test_dataset_vboard(self): # from nlpsc.vboard.dataset import index from ..vboard", "..vboard import bottle bottle.TEMPLATE_PATH.append('../vboard/views/') dataset = Dataset(name='测试数据集') dataset.add_header('F-no(int) F-text_a F-text_b L-label1(list) L-label2') DatasetVBoard(dataset).serve()" ]
[ "Of Code-35 2.py n,m,k=map(int,input().split()) a=[[0]*n for i in range(n)] for i in range(n):", "range(n): if(i==0 and j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for i", "n,m,k=map(int,input().split()) a=[[0]*n for i in range(n)] for i in range(n): for j in", "j in range(n): if(i==0 and j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1", "else: a[i][j]=a[i][j-1]-1 for i in range(n): for j in range(n): print(a[i][j],sep=\" \",end=\" \")", "a=[[0]*n for i in range(n)] for i in range(n): for j in range(n):", "35/Triple Recursion-Week Of Code-35 2.py n,m,k=map(int,input().split()) a=[[0]*n for i in range(n)] for i", "for i in range(n): for j in range(n): if(i==0 and j==0): a[i][j]=m elif(i==j):", "a[i][j]=a[i][j-1]-1 for i in range(n): for j in range(n): print(a[i][j],sep=\" \",end=\" \") print()", "a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for i in range(n): for j", "a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for i in range(n): for j in range(n): print(a[i][j],sep=\" \",end=\"", "2.py n,m,k=map(int,input().split()) a=[[0]*n for i in range(n)] for i in range(n): for j", "in range(n)] for i in range(n): for j in range(n): if(i==0 and j==0):", "and j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for i in range(n):", "Code 35/Triple Recursion-Week Of Code-35 2.py n,m,k=map(int,input().split()) a=[[0]*n for i in range(n)] for", "j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for i in range(n): for", "a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for i in range(n): for j in range(n):", "Recursion-Week Of Code-35 2.py n,m,k=map(int,input().split()) a=[[0]*n for i in range(n)] for i in", "elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for i in range(n): for j in range(n): print(a[i][j],sep=\"", "<filename>Week of Code 35/Triple Recursion-Week Of Code-35 2.py n,m,k=map(int,input().split()) a=[[0]*n for i in", "for j in range(n): if(i==0 and j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else:", "if(i==0 and j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for i in", "range(n): for j in range(n): if(i==0 and j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1", "elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for i in range(n): for j in", "i in range(n)] for i in range(n): for j in range(n): if(i==0 and", "in range(n): if(i==0 and j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j): a[i][j]=a[i-1][j]-1 else: a[i][j]=a[i][j-1]-1 for", "i in range(n): for j in range(n): if(i==0 and j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k", "in range(n): for j in range(n): if(i==0 and j==0): a[i][j]=m elif(i==j): a[i][j]=a[i-1][j-1]+k elif(i>j):", "for i in range(n)] for i in range(n): for j in range(n): if(i==0", "Code-35 2.py n,m,k=map(int,input().split()) a=[[0]*n for i in range(n)] for i in range(n): for", "range(n)] for i in range(n): for j in range(n): if(i==0 and j==0): a[i][j]=m", "of Code 35/Triple Recursion-Week Of Code-35 2.py n,m,k=map(int,input().split()) a=[[0]*n for i in range(n)]" ]
[]
[ "readConfFile(fileName): \"\"\"Reads a Kitti camera/velodyne configuration file. Name of the parameter and the", "TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED", "cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation and traslation from camera 0 to camera 2 Rcam0Tocam2", "o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load and parse a velodyne binary file.\"\"\"", "# Write lidar points in cam0 coordinate frame points into ply-file pcd =", "dtype=float, sep=' ').tolist() else: output = np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape) return output except", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY", "camera/velodyne configuration file. Name of the parameter and the data is separated by", "#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND", "matrix_shape is None: output = np.fromstring(input_str, dtype=float, sep=' ').tolist() else: output = np.fromstring(input_str,", "from the projection matrix P = K[R | t] Kcam2rect = np.matmul(Pcam2[:3, :3],", "Parameters ---------- input_str : str String to be converted into numpy matrix. matrix_shape", "im_size_rcam2, image_data) # Write original lidar points into ply-file pcd = o3d.geometry.PointCloud() pcd.points", "# Projection matrix from camera 2 to rectified camera 2 Pcam2 = extractMatrix(cam_conf['P_rect_02'],", "camera0 to camera 2 coordinate frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project lidar", "\"<EMAIL>\" __status__ = \"Development\" #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "return scan.reshape((-1, 4)) def readConfFile(fileName): \"\"\"Reads a Kitti camera/velodyne configuration file. Name of", "INCIDENTAL, SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "output shape. Returns ------- numpy.array Numpy array that has the shape matrix_shape \"\"\"", "\"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" #THIS SOFTWARE IS PROVIDED BY THE", "= np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4)) def readConfFile(fileName): \"\"\"Reads a Kitti camera/velodyne configuration", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "import numpy as np import open3d as o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file):", "+ str(Kcam2rect)) # Transform lidar points to camera 0 coordinate frame lidar_data_cam0 =", "load_velo_scan(file): \"\"\"Load and parse a velodyne binary file.\"\"\" scan = np.fromfile(file, dtype=np.float32) return", "WARRANTIES, #INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "Tcam0Tocam2) # Projection matrix from camera 2 to rectified camera 2 Pcam2 =", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "import cameralib import numpy as np import open3d as o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py", "pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write \"filtered\" points into", "e: raise def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a str into a matrix/vector. Parameters ----------", "rectified camera 2 Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3))", "the data is separated by ':', i.e 'T: 0.0 0.0 0.0'. Parameters ----------", "transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation and traslation from camera 0", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "Returns ------- dictionary a dictionary containing the configuration data. \"\"\" conf_dict = dict()", "OF THE POSSIBILITY OF SUCH DAMAGE. import cv2 import cameralib import numpy as", "data = line.split(\":\") conf_dict[data[0]] = data[1] return conf_dict except Exception as e: raise", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,", "# Transform lidar points to camera 0 coordinate frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data)", "fileName : str Name of the file to be read. Returns ------- dictionary", "numpy.array Numpy array that has the shape matrix_shape \"\"\" try: if matrix_shape is", "dtype=np.float32) return scan.reshape((-1, 4)) def readConfFile(fileName): \"\"\"Reads a Kitti camera/velodyne configuration file. Name", "String to be converted into numpy matrix. matrix_shape : tuple Tuple defining the", "TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation and traslation from camera 0 to", "if matrix_shape is None: output = np.fromstring(input_str, dtype=float, sep=' ').tolist() else: output =", "= cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data) # Write original lidar points into ply-file pcd", "<gh_stars>1-10 __author__ = \"<NAME>\" __copyright__ = \"Copyright, 2021, <NAME>\" __license__ = \"3-Clause BSD", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT", "Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation and traslation from camera 0 to camera", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "------- dictionary a dictionary containing the configuration data. \"\"\" conf_dict = dict() try:", "cameralib import numpy as np import open3d as o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def", "lidar_data_cam0) # Project lidar points into rectified camera 2 cam2_lidar, uv, RGB_lidar, depth_map", "image_data) # Write original lidar points into ply-file pcd = o3d.geometry.PointCloud() pcd.points =", "the configuration data. \"\"\" conf_dict = dict() try: with open(fileName) as f: for", "__status__ = \"Development\" #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "by ':', i.e 'T: 0.0 0.0 0.0'. Parameters ---------- fileName : str Name", "and the data is separated by ':', i.e 'T: 0.0 0.0 0.0'. Parameters", "dictionary containing the configuration data. \"\"\" conf_dict = dict() try: with open(fileName) as", "sep=' ').tolist() else: output = np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape) return output except Exception", "and traslation from camera 0 to camera 2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3))", "= readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data =", "configuration data. \"\"\" conf_dict = dict() try: with open(fileName) as f: for line", "into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write \"filtered\"", "(3, 3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract K-matrix from the projection matrix", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "numpy matrix. matrix_shape : tuple Tuple defining the output shape. Returns ------- numpy.array", "points in cam2 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points =", "def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a str into a matrix/vector. Parameters ---------- input_str :", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cv2 import cameralib import", "Rotation and traslation from velodyne to camera 0 RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3))", "camera 2 coordinate frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project lidar points into", "COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "= extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2)", "o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write lidar points in cam2 coordinate", "0.0 0.0 0.0'. Parameters ---------- fileName : str Name of the file to", "__email__ = \"<EMAIL>\" __status__ = \"Development\" #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write \"filtered\" points into ply-file pcd = o3d.geometry.PointCloud() pcd.points", "(3, 3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract", "dtype=float, sep=' ').reshape(matrix_shape) return output except Exception as e: raise #-------------- # Test", "into a matrix/vector. Parameters ---------- input_str : str String to be converted into", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "str into a matrix/vector. Parameters ---------- input_str : str String to be converted", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,", "frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data) # Transform lidar points from camera0 to camera", "OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL", "return output except Exception as e: raise #-------------- # Test program #-------------- #", "ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(cam2_lidar.transpose()) pcd.colors = o3d.utility.Vector3dVector(RGB_lidar / 255) o3d.io.write_point_cloud(\"3d_proj_cam2.ply\",", "Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection matrix from", "cv2 import cameralib import numpy as np import open3d as o3d # Source:", "cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data", "0 RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1)) # Trans_velTocam0", "lidar points into rectified camera 2 cam2_lidar, uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2,", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; #OR BUSINESS", "points into rectified camera 2 cam2_lidar, uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2,", "f: data = line.split(\":\") conf_dict[data[0]] = data[1] return conf_dict except Exception as e:", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; #OR BUSINESS INTERRUPTION) HOWEVER", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR", "points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "lidar_data) # Transform lidar points from camera0 to camera 2 coordinate frame lidar_data_cam2", "np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape) return output except Exception as e: raise #-------------- #", "uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data) # Write original lidar points", "FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT", "Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02'])", "points in cam0 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points =", "EXPRESS OR IMPLIED WARRANTIES, #INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF", "\"<NAME>\" __copyright__ = \"Copyright, 2021, <NAME>\" __license__ = \"3-Clause BSD License\" __maintainer__ =", "np.fromstring(input_str, dtype=float, sep=' ').tolist() else: output = np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape) return output", "str Name of the file to be read. Returns ------- dictionary a dictionary", "ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #OR TORT (INCLUDING NEGLIGENCE", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, #INCLUDING,", "im_size_rcam2.reverse() # Extract K-matrix from the projection matrix P = K[R | t]", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation", "conf_dict[data[0]] = data[1] return conf_dict except Exception as e: raise def extractMatrix(input_str, matrix_shape=None):", "= o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write \"filtered\" points into ply-file", "Write lidar points in cam2 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud()", "extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) #", "frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) #", "extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a str into a matrix/vector. Parameters ---------- input_str : str", "\"filtered\" points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(cam2_lidar.transpose()) pcd.colors = o3d.utility.Vector3dVector(RGB_lidar", "THE POSSIBILITY OF SUCH DAMAGE. import cv2 import cameralib import numpy as np", "into numpy matrix. matrix_shape : tuple Tuple defining the output shape. Returns -------", "into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write lidar", "from camera 0 to camera 2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2 =", "a matrix/vector. Parameters ---------- input_str : str String to be converted into numpy", "o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write \"filtered\" points into ply-file pcd", "file.\"\"\" scan = np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4)) def readConfFile(fileName): \"\"\"Reads a Kitti", "lidar points in cam2 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; #OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "\"\"\"Convert a str into a matrix/vector. Parameters ---------- input_str : str String to", "__copyright__ = \"Copyright, 2021, <NAME>\" __license__ = \"3-Clause BSD License\" __maintainer__ = \"<NAME>\"", "rectified camera 2 cam2_lidar, uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data) #", ":3]) image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and traslation from velodyne to camera", "raise def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a str into a matrix/vector. Parameters ---------- input_str", "= \"<EMAIL>\" __status__ = \"Development\" #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "__maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" #THIS SOFTWARE IS PROVIDED", "np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4)) def readConfFile(fileName): \"\"\"Reads a Kitti camera/velodyne configuration file.", "pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write lidar points in", "= o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write lidar points in cam0", "Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation and traslation from", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "scan.reshape((-1, 4)) def readConfFile(fileName): \"\"\"Reads a Kitti camera/velodyne configuration file. Name of the", "shape matrix_shape \"\"\" try: if matrix_shape is None: output = np.fromstring(input_str, dtype=float, sep='", "import cv2 import cameralib import numpy as np import open3d as o3d #", "= o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write \"filtered\" points into ply-file pcd = o3d.geometry.PointCloud()", "import open3d as o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load and parse a", "conf_dict = dict() try: with open(fileName) as f: for line in f: data", "CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL DAMAGES", "(3, 3)) TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1)) # Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,", "the file to be read. Returns ------- dictionary a dictionary containing the configuration", "Numpy array that has the shape matrix_shape \"\"\" try: if matrix_shape is None:", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "4)) Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2 =", "# Transform lidar points from camera0 to camera 2 coordinate frame lidar_data_cam2 =", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cv2 import cameralib", "try: with open(fileName) as f: for line in f: data = line.split(\":\") conf_dict[data[0]]", "shape. Returns ------- numpy.array Numpy array that has the shape matrix_shape \"\"\" try:", "CONTRACT, STRICT LIABILITY, #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "Name of the parameter and the data is separated by ':', i.e 'T:", "Write lidar points in cam0 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud()", "#INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "= np.fromstring(input_str, dtype=float, sep=' ').tolist() else: output = np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape) return", "points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write", "matrix_shape : tuple Tuple defining the output shape. Returns ------- numpy.array Numpy array", "3)) TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1)) # Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 =", "lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and", "t] Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam 2: \" + str(Kcam2)) #print(\"Kcam rectified", "coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd)", "Transform lidar points from camera0 to camera 2 coordinate frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2,", "o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write lidar points in cam0 coordinate frame points into", "lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png'))", "pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(cam2_lidar.transpose()) pcd.colors = o3d.utility.Vector3dVector(RGB_lidar / 255) o3d.io.write_point_cloud(\"3d_proj_cam2.ply\", pcd)", "DATA, OR PROFITS; #OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "lidar points from camera0 to camera 2 coordinate frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0)", "pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write lidar points in cam0 coordinate frame", "THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "(3, 3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection", "= np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam 2: \" + str(Kcam2)) #print(\"Kcam rectified 2: \"", "OF USE, DATA, OR PROFITS; #OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "line in f: data = line.split(\":\") conf_dict[data[0]] = data[1] return conf_dict except Exception", "data is separated by ':', i.e 'T: 0.0 0.0 0.0'. Parameters ---------- fileName", "Parameters ---------- fileName : str Name of the file to be read. Returns", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "a str into a matrix/vector. Parameters ---------- input_str : str String to be", "has the shape matrix_shape \"\"\" try: if matrix_shape is None: output = np.fromstring(input_str,", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cv2 import", "3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract K-matrix from the projection matrix P", "= cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection matrix from camera 2 to rectified camera 2", "\" + str(Kcam2rect)) # Transform lidar points to camera 0 coordinate frame lidar_data_cam0", "ANY EXPRESS OR IMPLIED WARRANTIES, #INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES", "ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write \"filtered\" points", "= \"Development\" #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data) # Transform lidar points from camera0 to camera 2", "from camera 2 to rectified camera 2 Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2", "camera 2 Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect", "# Extract K-matrix from the projection matrix P = K[R | t] Kcam2rect", "Kitti camera/velodyne configuration file. Name of the parameter and the data is separated", "the projection matrix P = K[R | t] Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose())", "points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(cam2_lidar.transpose()) pcd.colors = o3d.utility.Vector3dVector(RGB_lidar /", "= extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection matrix from camera", "matrix from camera 2 to rectified camera 2 Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4))", "= extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract K-matrix from the", "Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam 2: \" + str(Kcam2)) #print(\"Kcam rectified 2:", "dict() try: with open(fileName) as f: for line in f: data = line.split(\":\")", "to camera 2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1))", "\"3-Clause BSD License\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" #THIS", "Exception as e: raise #-------------- # Test program #-------------- # Read configuration files", "\"\"\" try: if matrix_shape is None: output = np.fromstring(input_str, dtype=float, sep=' ').tolist() else:", "pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write lidar points in cam2 coordinate frame", "2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2 =", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write lidar points", "= \"3-Clause BSD License\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\"", "file. Name of the parameter and the data is separated by ':', i.e", "Read configuration files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[", "= K[R | t] Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam 2: \" +", "<NAME>\" __license__ = \"3-Clause BSD License\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__", "3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection matrix", "Exception as e: raise def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a str into a matrix/vector.", "#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "Name of the file to be read. Returns ------- dictionary a dictionary containing", "raise #-------------- # Test program #-------------- # Read configuration files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt')", "and parse a velodyne binary file.\"\"\" scan = np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4))", "# Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load and parse a velodyne binary file.\"\"\" scan", "as e: raise def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a str into a matrix/vector. Parameters", "separated by ':', i.e 'T: 0.0 0.0 0.0'. Parameters ---------- fileName : str", "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.", "depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data) # Write original lidar points into ply-file", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; #OR", "= data[1] return conf_dict except Exception as e: raise def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert", "0 coordinate frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data) # Transform lidar points from camera0", "2 cam2_lidar, uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data) # Write original", "# Read configuration files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan(", "= extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3,", "1)) # Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation and", "cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project lidar points into rectified camera 2 cam2_lidar, uv, RGB_lidar,", "cameralib.transform(Trans_velTocam0, lidar_data) # Transform lidar points from camera0 to camera 2 coordinate frame", "str(Kcam2rect)) # Transform lidar points to camera 0 coordinate frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0,", "frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) #", "# Rotation and traslation from velodyne to camera 0 RvelTocam0 = extractMatrix(lidar_conf['R'], (3,", "matrix P = K[R | t] Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam 2:", "array that has the shape matrix_shape \"\"\" try: if matrix_shape is None: output", "velodyne to camera 0 RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0 = extractMatrix(lidar_conf['T'], (3,", "to be converted into numpy matrix. matrix_shape : tuple Tuple defining the output", "pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write \"filtered\" points into ply-file pcd =", "a dictionary containing the configuration data. \"\"\" conf_dict = dict() try: with open(fileName)", "sep=' ').reshape(matrix_shape) return output except Exception as e: raise #-------------- # Test program", "__license__ = \"3-Clause BSD License\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ =", "TvelTocam0) # Rotation and traslation from camera 0 to camera 2 Rcam0Tocam2 =", "#OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #OR", "files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3])", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF", "#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "= \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" #THIS SOFTWARE IS PROVIDED BY", "4)) def readConfFile(fileName): \"\"\"Reads a Kitti camera/velodyne configuration file. Name of the parameter", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "as o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load and parse a velodyne binary", "= dict() try: with open(fileName) as f: for line in f: data =", "extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract K-matrix from the projection matrix P = K[R |", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL", "pcd) # Write lidar points in cam0 coordinate frame points into ply-file pcd", "# Project lidar points into rectified camera 2 cam2_lidar, uv, RGB_lidar, depth_map =", "2 Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect =", "readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data = np.array(cv2.imread(", "traslation from camera 0 to camera 2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2", "OF SUCH DAMAGE. import cv2 import cameralib import numpy as np import open3d", "except Exception as e: raise def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a str into a", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "matrix_shape=None): \"\"\"Convert a str into a matrix/vector. Parameters ---------- input_str : str String", "output except Exception as e: raise #-------------- # Test program #-------------- # Read", "lidar points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) #", "RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1)) # Trans_velTocam0 =", "__author__ = \"<NAME>\" __copyright__ = \"Copyright, 2021, <NAME>\" __license__ = \"3-Clause BSD License\"", "cam2 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\",", "= cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project lidar points into rectified camera 2 cam2_lidar, uv,", "---------- input_str : str String to be converted into numpy matrix. matrix_shape :", "2: \" + str(Kcam2rect)) # Transform lidar points to camera 0 coordinate frame", "Project lidar points into rectified camera 2 cam2_lidar, uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2,", "into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write lidar", "a velodyne binary file.\"\"\" scan = np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4)) def readConfFile(fileName):", "frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project lidar points into rectified camera 2", "= extractMatrix(lidar_conf['T'], (3, 1)) # Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0)", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cv2 import cameralib import numpy", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "#IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cv2", "(3, 1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection matrix from camera 2 to", "None: output = np.fromstring(input_str, dtype=float, sep=' ').tolist() else: output = np.fromstring(input_str, dtype=float, sep='", "IN CONTRACT, STRICT LIABILITY, #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "data[1] return conf_dict except Exception as e: raise def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a", "HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, #OR", "that has the shape matrix_shape \"\"\" try: if matrix_shape is None: output =", "\"\"\"Reads a Kitti camera/velodyne configuration file. Name of the parameter and the data", "= transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation and traslation from camera", "TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1)) # Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0,", "---------- fileName : str Name of the file to be read. Returns -------", "AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #OR TORT", "Write \"filtered\" points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(cam2_lidar.transpose()) pcd.colors =", "https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load and parse a velodyne binary file.\"\"\" scan = np.fromfile(file,", "file to be read. Returns ------- dictionary a dictionary containing the configuration data.", "= extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1)) # Trans_velTocam0 = transform_from_rot_trans(RvelTocam0,", "\"Development\" #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; #OR BUSINESS INTERRUPTION)", "Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2,", "tuple Tuple defining the output shape. Returns ------- numpy.array Numpy array that has", "be read. Returns ------- dictionary a dictionary containing the configuration data. \"\"\" conf_dict", "str String to be converted into numpy matrix. matrix_shape : tuple Tuple defining", "').tolist() else: output = np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape) return output except Exception as", "projection matrix P = K[R | t] Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam", "o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd) # Write \"filtered\" points into ply-file pcd = o3d.geometry.PointCloud() pcd.points =", "a Kitti camera/velodyne configuration file. Name of the parameter and the data is", "output = np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape) return output except Exception as e: raise", "to be read. Returns ------- dictionary a dictionary containing the configuration data. \"\"\"", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,", "from velodyne to camera 0 RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0 = extractMatrix(lidar_conf['T'],", "np import open3d as o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load and parse", "# Write original lidar points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose())", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF", "= o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write lidar points in cam2 coordinate frame points", "extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3))", "parse a velodyne binary file.\"\"\" scan = np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4)) def", "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "points to camera 0 coordinate frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data) # Transform lidar", "coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose()) o3d.io.write_point_cloud(\"3d_cam2.ply\", pcd)", "PROFITS; #OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "conf_dict except Exception as e: raise def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a str into", "extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() #", "\"\"\" conf_dict = dict() try: with open(fileName) as f: for line in f:", "the parameter and the data is separated by ':', i.e 'T: 0.0 0.0", ": str Name of the file to be read. Returns ------- dictionary a", ":3], Rcam2rect.transpose()) #print(\"Kcam 2: \" + str(Kcam2)) #print(\"Kcam rectified 2: \" + str(Kcam2rect))", "= o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write lidar points in cam2", "POSSIBILITY OF SUCH DAMAGE. import cv2 import cameralib import numpy as np import", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED", "\" + str(Kcam2)) #print(\"Kcam rectified 2: \" + str(Kcam2rect)) # Transform lidar points", "return conf_dict except Exception as e: raise def extractMatrix(input_str, matrix_shape=None): \"\"\"Convert a str", "#print(\"Kcam 2: \" + str(Kcam2)) #print(\"Kcam rectified 2: \" + str(Kcam2rect)) # Transform", "cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data) # Write original lidar points into ply-file pcd =", "P = K[R | t] Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam 2: \"", "License\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" #THIS SOFTWARE IS", "BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL DAMAGES (INCLUDING,", "').reshape(matrix_shape) return output except Exception as e: raise #-------------- # Test program #--------------", "BSD License\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" #THIS SOFTWARE", "2 coordinate frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project lidar points into rectified", "matrix. matrix_shape : tuple Tuple defining the output shape. Returns ------- numpy.array Numpy", "coordinate frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data) # Transform lidar points from camera0 to", "else: output = np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape) return output except Exception as e:", "# Write \"filtered\" points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(cam2_lidar.transpose()) pcd.colors", "0.0 0.0'. Parameters ---------- fileName : str Name of the file to be", "= readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) #", "AND ANY EXPRESS OR IMPLIED WARRANTIES, #INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and traslation from", ": str String to be converted into numpy matrix. matrix_shape : tuple Tuple", "extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1)) # Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0)", "camera 2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2", "lidar points to camera 0 coordinate frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data) # Transform", "to camera 2 coordinate frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project lidar points", "BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "SUCH DAMAGE. import cv2 import cameralib import numpy as np import open3d as", "image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and traslation from velodyne to camera 0", "(3, 4)) Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2", "+ str(Kcam2)) #print(\"Kcam rectified 2: \" + str(Kcam2rect)) # Transform lidar points to", "0 to camera 2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3,", "try: if matrix_shape is None: output = np.fromstring(input_str, dtype=float, sep=' ').tolist() else: output", "np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam 2: \" + str(Kcam2)) #print(\"Kcam rectified 2: \" +", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write", "np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and traslation from velodyne to camera 0 RvelTocam0 =", "# Rotation and traslation from camera 0 to camera 2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'],", "= np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape) return output except Exception as e: raise #--------------", "as f: for line in f: data = line.split(\":\") conf_dict[data[0]] = data[1] return", "PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "#-------------- # Test program #-------------- # Read configuration files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf", "Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract K-matrix from", "RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data) # Write original lidar points into", "o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write lidar points in cam2 coordinate frame points into", "= \"Copyright, 2021, <NAME>\" __license__ = \"3-Clause BSD License\" __maintainer__ = \"<NAME>\" __email__", "of the parameter and the data is separated by ':', i.e 'T: 0.0", "(3, 1)) # Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation", "extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract K-matrix from the projection", "Transform lidar points to camera 0 coordinate frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data) #", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "line.split(\":\") conf_dict[data[0]] = data[1] return conf_dict except Exception as e: raise def extractMatrix(input_str,", "except Exception as e: raise #-------------- # Test program #-------------- # Read configuration", "Rotation and traslation from camera 0 to camera 2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3,", "AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, #INCLUDING, BUT NOT", "def readConfFile(fileName): \"\"\"Reads a Kitti camera/velodyne configuration file. Name of the parameter and", "# Write lidar points in cam2 coordinate frame points into ply-file pcd =", "into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(cam2_lidar.transpose()) pcd.colors = o3d.utility.Vector3dVector(RGB_lidar / 255)", "K[R | t] Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam 2: \" + str(Kcam2))", "def load_velo_scan(file): \"\"\"Load and parse a velodyne binary file.\"\"\" scan = np.fromfile(file, dtype=np.float32)", "= line.split(\":\") conf_dict[data[0]] = data[1] return conf_dict except Exception as e: raise def", "o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write lidar points in cam0 coordinate", "and traslation from velodyne to camera 0 RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0", "2 to rectified camera 2 Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2 = extractMatrix(cam_conf['K_02'],", "AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE", "# Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation and traslation", "from camera0 to camera 2 coordinate frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project", "Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'],", "= extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract K-matrix from the projection matrix P = K[R", "ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE", "OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "STRICT LIABILITY, #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project lidar points into rectified camera 2 cam2_lidar,", "open(fileName) as f: for line in f: data = line.split(\":\") conf_dict[data[0]] = data[1]", "= np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and traslation", "extractMatrix(lidar_conf['T'], (3, 1)) # Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0) Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0) #", "# Test program #-------------- # Read configuration files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf =", "into rectified camera 2 cam2_lidar, uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data)", "camera 0 coordinate frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data) # Transform lidar points from", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, #OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "the shape matrix_shape \"\"\" try: if matrix_shape is None: output = np.fromstring(input_str, dtype=float,", "#print(\"Kcam rectified 2: \" + str(Kcam2rect)) # Transform lidar points to camera 0", ":, :3]) image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and traslation from velodyne to", "cam2_lidar, uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data) # Write original lidar", "dictionary a dictionary containing the configuration data. \"\"\" conf_dict = dict() try: with", "Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load and parse a velodyne binary file.\"\"\" scan =", "= o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write lidar points in cam0 coordinate frame points", "in cam2 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose())", "is separated by ':', i.e 'T: 0.0 0.0 0.0'. Parameters ---------- fileName :", "Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection matrix from camera 2 to rectified camera", "LOSS OF USE, DATA, OR PROFITS; #OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection matrix from camera 2 to rectified camera 2 Pcam2", "FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT HOLDER", "to camera 0 coordinate frame lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data) # Transform lidar points", "of the file to be read. Returns ------- dictionary a dictionary containing the", ": tuple Tuple defining the output shape. Returns ------- numpy.array Numpy array that", "DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR", "parameter and the data is separated by ':', i.e 'T: 0.0 0.0 0.0'.", "| t] Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose()) #print(\"Kcam 2: \" + str(Kcam2)) #print(\"Kcam", "BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "camera 2 cam2_lidar, uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data) # Write", "Tuple defining the output shape. Returns ------- numpy.array Numpy array that has the", "Write original lidar points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\",", "configuration files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data = np.transpose(load_velo_scan( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :,", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN", "K-matrix from the projection matrix P = K[R | t] Kcam2rect = np.matmul(Pcam2[:3,", "OR PROFITS; #OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract K-matrix from the projection matrix P =", "configuration file. Name of the parameter and the data is separated by ':',", "ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #OR TORT (INCLUDING", "velodyne binary file.\"\"\" scan = np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4)) def readConfFile(fileName): \"\"\"Reads", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #OR TORT (INCLUDING NEGLIGENCE OR", "containing the configuration data. \"\"\" conf_dict = dict() try: with open(fileName) as f:", "defining the output shape. Returns ------- numpy.array Numpy array that has the shape", "= np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and traslation from velodyne to camera 0 RvelTocam0", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS", "EXEMPLARY, #OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "LIABILITY, #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "i.e 'T: 0.0 0.0 0.0'. Parameters ---------- fileName : str Name of the", "o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write lidar points in cam2 coordinate frame points into ply-file", "matrix_shape \"\"\" try: if matrix_shape is None: output = np.fromstring(input_str, dtype=float, sep=' ').tolist()", "lidar points in cam0 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points", "Pcam2, im_size_rcam2, image_data) # Write original lidar points into ply-file pcd = o3d.geometry.PointCloud()", "Rcam2rect.transpose()) #print(\"Kcam 2: \" + str(Kcam2)) #print(\"Kcam rectified 2: \" + str(Kcam2rect)) #", "program #-------------- # Read configuration files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data", "2: \" + str(Kcam2)) #print(\"Kcam rectified 2: \" + str(Kcam2rect)) # Transform lidar", "f: for line in f: data = line.split(\":\") conf_dict[data[0]] = data[1] return conf_dict", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "pcd) # Write lidar points in cam2 coordinate frame points into ply-file pcd", "1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection matrix from camera 2 to rectified", "HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, #INCLUDING, BUT", "coordinate frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) # Project lidar points into rectified camera", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "Test program #-------------- # Read configuration files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt')", "= \"<NAME>\" __copyright__ = \"Copyright, 2021, <NAME>\" __license__ = \"3-Clause BSD License\" __maintainer__", "pcd) # Write \"filtered\" points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(cam2_lidar.transpose())", "o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write lidar points in cam0 coordinate frame points into ply-file", "be converted into numpy matrix. matrix_shape : tuple Tuple defining the output shape.", "rectified 2: \" + str(Kcam2rect)) # Transform lidar points to camera 0 coordinate", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO", "to camera 0 RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1))", "extractMatrix(cam_conf['T_02'], (3, 1)) Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2) # Projection matrix from camera 2", "ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd) # Write lidar points", "str(Kcam2)) #print(\"Kcam rectified 2: \" + str(Kcam2rect)) # Transform lidar points to camera", "e: raise #-------------- # Test program #-------------- # Read configuration files cam_conf =", "'T: 0.0 0.0 0.0'. Parameters ---------- fileName : str Name of the file", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "0.0'. Parameters ---------- fileName : str Name of the file to be read.", "camera 2 to rectified camera 2 Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2 =", "#-------------- # Read configuration files cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt') lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt') lidar_data =", "\"\"\"Load and parse a velodyne binary file.\"\"\" scan = np.fromfile(file, dtype=np.float32) return scan.reshape((-1,", "open3d as o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load and parse a velodyne", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS", "cam0 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\",", "for line in f: data = line.split(\":\") conf_dict[data[0]] = data[1] return conf_dict except", "Extract K-matrix from the projection matrix P = K[R | t] Kcam2rect =", "CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, #INCLUDING, BUT NOT LIMITED", "= cameralib.concatenateRt(RvelTocam0, TvelTocam0) # Rotation and traslation from camera 0 to camera 2", "read. Returns ------- dictionary a dictionary containing the configuration data. \"\"\" conf_dict =", "USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "as np import open3d as o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load and", "is None: output = np.fromstring(input_str, dtype=float, sep=' ').tolist() else: output = np.fromstring(input_str, dtype=float,", "IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, #INCLUDING, BUT NOT LIMITED TO, THE", "Returns ------- numpy.array Numpy array that has the shape matrix_shape \"\"\" try: if", "2021, <NAME>\" __license__ = \"3-Clause BSD License\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\"", "A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "':', i.e 'T: 0.0 0.0 0.0'. Parameters ---------- fileName : str Name of", "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, #INCLUDING, BUT NOT LIMITED TO,", "USE, DATA, OR PROFITS; #OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "points from camera0 to camera 2 coordinate frame lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0) #", "matrix/vector. Parameters ---------- input_str : str String to be converted into numpy matrix.", "3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse() # Extract K-matrix", "as e: raise #-------------- # Test program #-------------- # Read configuration files cam_conf", "output = np.fromstring(input_str, dtype=float, sep=' ').tolist() else: output = np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape)", "the output shape. Returns ------- numpy.array Numpy array that has the shape matrix_shape", "binary file.\"\"\" scan = np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4)) def readConfFile(fileName): \"\"\"Reads a", "------- numpy.array Numpy array that has the shape matrix_shape \"\"\" try: if matrix_shape", "WHETHER IN CONTRACT, STRICT LIABILITY, #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "Projection matrix from camera 2 to rectified camera 2 Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3,", "= extractMatrix(cam_conf['K_02'], (3, 3)) Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3)) im_size_rcam2 = extractMatrix(cam_conf['S_rect_02']) im_size_rcam2.reverse()", "camera 0 RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1)) #", "\"Copyright, 2021, <NAME>\" __license__ = \"3-Clause BSD License\" __maintainer__ = \"<NAME>\" __email__ =", "with open(fileName) as f: for line in f: data = line.split(\":\") conf_dict[data[0]] =", "input_str : str String to be converted into numpy matrix. matrix_shape : tuple", "scan = np.fromfile(file, dtype=np.float32) return scan.reshape((-1, 4)) def readConfFile(fileName): \"\"\"Reads a Kitti camera/velodyne", "converted into numpy matrix. matrix_shape : tuple Tuple defining the output shape. Returns", "data. \"\"\" conf_dict = dict() try: with open(fileName) as f: for line in", "original lidar points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose()) o3d.io.write_point_cloud(\"3d_lidar.ply\", pcd)", "numpy as np import open3d as o3d # Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py def load_velo_scan(file): \"\"\"Load", "pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose()) o3d.io.write_point_cloud(\"3d_cam0.ply\", pcd) # Write lidar points in", "= cameralib.transform(Trans_velTocam0, lidar_data) # Transform lidar points from camera0 to camera 2 coordinate", "'./test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and traslation from velodyne to camera 0 RvelTocam0 = extractMatrix(lidar_conf['R'],", "OR IMPLIED WARRANTIES, #INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY", "DAMAGE. import cv2 import cameralib import numpy as np import open3d as o3d", "traslation from velodyne to camera 0 RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3)) TvelTocam0 =", "in cam0 coordinate frame points into ply-file pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose())", "in f: data = line.split(\":\") conf_dict[data[0]] = data[1] return conf_dict except Exception as", "SERVICES; LOSS OF USE, DATA, OR PROFITS; #OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "IMPLIED WARRANTIES, #INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND", "camera 0 to camera 2 Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3)) Tcam0Tocam2 = extractMatrix(cam_conf['T_02'],", "'./test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[ :, :3]) image_data = np.array(cv2.imread( './test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png')) # Rotation and traslation from velodyne", "to rectified camera 2 Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4)) Kcam2 = extractMatrix(cam_conf['K_02'], (3," ]
[ "words in the triviaqa dataset, including all documents and all train questions. \"\"\"", "import argparse from os.path import exists from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import", "print(\"Adding question voc...\") train = data.get_train() for q in train: corpus_voc.update(q.question) print(\"Saving...\") with", "\"\"\" Build vocab of all words in the triviaqa dataset, including all documents", "questions. \"\"\" def main(): parser = argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int, default=1) parser.add_argument(\"-n\",", "from os.path import exists from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\"", "all train questions. \"\"\" def main(): parser = argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int,", "argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1) args = parser.parse_args()", "= argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1) args =", "as f: for word, c in corpus_voc.items(): if c >= args.min_count: f.write(word) f.write(\"\\n\")", "TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\" Build vocab of all words in the", "corpus_voc.update(q.question) print(\"Saving...\") with open(args.output, \"w\") as f: for word, c in corpus_voc.items(): if", "exists(args.output): raise ValueError() data = TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question voc...\")", "documents and all train questions. \"\"\" def main(): parser = argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\",", "train: corpus_voc.update(q.question) print(\"Saving...\") with open(args.output, \"w\") as f: for word, c in corpus_voc.items():", "in corpus_voc.items(): if c >= args.min_count: f.write(word) f.write(\"\\n\") if __name__ == \"__main__\": main()", "if exists(args.output): raise ValueError() data = TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question", "get_evidence_voc \"\"\" Build vocab of all words in the triviaqa dataset, including all", "print(\"Saving...\") with open(args.output, \"w\") as f: for word, c in corpus_voc.items(): if c", "question voc...\") train = data.get_train() for q in train: corpus_voc.update(q.question) print(\"Saving...\") with open(args.output,", "= TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question voc...\") train = data.get_train() for", "parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1) args = parser.parse_args() if", "for q in train: corpus_voc.update(q.question) print(\"Saving...\") with open(args.output, \"w\") as f: for word,", "args = parser.parse_args() if exists(args.output): raise ValueError() data = TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence,", "argparse from os.path import exists from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import get_evidence_voc", "import TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\" Build vocab of all words in", "os.path import exists from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\" Build", "parser = argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1) args", "\"--n_processes\", type=int, default=1) args = parser.parse_args() if exists(args.output): raise ValueError() data = TriviaQaOpenDataset()", "Build vocab of all words in the triviaqa dataset, including all documents and", "\"w\") as f: for word, c in corpus_voc.items(): if c >= args.min_count: f.write(word)", "raise ValueError() data = TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question voc...\") train", "args.n_processes) print(\"Adding question voc...\") train = data.get_train() for q in train: corpus_voc.update(q.question) print(\"Saving...\")", "f: for word, c in corpus_voc.items(): if c >= args.min_count: f.write(word) f.write(\"\\n\") if", "= get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question voc...\") train = data.get_train() for q in train:", "from docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\" Build vocab of all words in the triviaqa", "for word, c in corpus_voc.items(): if c >= args.min_count: f.write(word) f.write(\"\\n\") if __name__", "word, c in corpus_voc.items(): if c >= args.min_count: f.write(word) f.write(\"\\n\") if __name__ ==", "q in train: corpus_voc.update(q.question) print(\"Saving...\") with open(args.output, \"w\") as f: for word, c", "data.get_train() for q in train: corpus_voc.update(q.question) print(\"Saving...\") with open(args.output, \"w\") as f: for", "default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1) args = parser.parse_args() if exists(args.output): raise ValueError() data", "\"\"\" def main(): parser = argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\",", "voc...\") train = data.get_train() for q in train: corpus_voc.update(q.question) print(\"Saving...\") with open(args.output, \"w\")", "import exists from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\" Build vocab", "in train: corpus_voc.update(q.question) print(\"Saving...\") with open(args.output, \"w\") as f: for word, c in", "triviaqa dataset, including all documents and all train questions. \"\"\" def main(): parser", "corpus_voc = get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question voc...\") train = data.get_train() for q in", "type=int, default=1) args = parser.parse_args() if exists(args.output): raise ValueError() data = TriviaQaOpenDataset() corpus_voc", "train = data.get_train() for q in train: corpus_voc.update(q.question) print(\"Saving...\") with open(args.output, \"w\") as", "parser.parse_args() if exists(args.output): raise ValueError() data = TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence, args.n_processes) print(\"Adding", "= parser.parse_args() if exists(args.output): raise ValueError() data = TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence, args.n_processes)", "docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\" Build vocab of all words", "c in corpus_voc.items(): if c >= args.min_count: f.write(word) f.write(\"\\n\") if __name__ == \"__main__\":", "data = TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question voc...\") train = data.get_train()", "docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\" Build vocab of all words in the triviaqa dataset,", "with open(args.output, \"w\") as f: for word, c in corpus_voc.items(): if c >=", "\"--min_count\", type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1) args = parser.parse_args() if exists(args.output): raise", "exists from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\" Build vocab of", "main(): parser = argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1)", "TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question voc...\") train = data.get_train() for q", "of all words in the triviaqa dataset, including all documents and all train", "open(args.output, \"w\") as f: for word, c in corpus_voc.items(): if c >= args.min_count:", "type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1) args = parser.parse_args() if exists(args.output): raise ValueError()", "train questions. \"\"\" def main(): parser = argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int, default=1)", "def main(): parser = argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\", type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int,", "including all documents and all train questions. \"\"\" def main(): parser = argparse.ArgumentParser()", "all words in the triviaqa dataset, including all documents and all train questions.", "default=1) args = parser.parse_args() if exists(args.output): raise ValueError() data = TriviaQaOpenDataset() corpus_voc =", "parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1) args = parser.parse_args() if exists(args.output): raise ValueError() data =", "import get_evidence_voc \"\"\" Build vocab of all words in the triviaqa dataset, including", "all documents and all train questions. \"\"\" def main(): parser = argparse.ArgumentParser() parser.add_argument(\"output\")", "in the triviaqa dataset, including all documents and all train questions. \"\"\" def", "get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question voc...\") train = data.get_train() for q in train: corpus_voc.update(q.question)", "and all train questions. \"\"\" def main(): parser = argparse.ArgumentParser() parser.add_argument(\"output\") parser.add_argument(\"-m\", \"--min_count\",", "dataset, including all documents and all train questions. \"\"\" def main(): parser =", "the triviaqa dataset, including all documents and all train questions. \"\"\" def main():", "vocab of all words in the triviaqa dataset, including all documents and all", "= data.get_train() for q in train: corpus_voc.update(q.question) print(\"Saving...\") with open(args.output, \"w\") as f:", "ValueError() data = TriviaQaOpenDataset() corpus_voc = get_evidence_voc(data.evidence, args.n_processes) print(\"Adding question voc...\") train =", "from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset from docqa.triviaqa.evidence_corpus import get_evidence_voc \"\"\" Build vocab of all", "parser.add_argument(\"-m\", \"--min_count\", type=int, default=1) parser.add_argument(\"-n\", \"--n_processes\", type=int, default=1) args = parser.parse_args() if exists(args.output):" ]
[ "DirectoryGetSessionsValidator(Schema): auth_request = validators.String() date_created = ValidateISODate() service_icon = validators.String() service_id = validators.String()", "8601 formatted \" \"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)\", value, state) return", "= validators.String() date_created = ValidateISODate() service_icon = validators.String() service_id = validators.String() service_name =", "FancyValidator, Invalid, ForEach from dateutil.parser import parse class ValidateISODate(FancyValidator): @staticmethod def _to_python(value, state):", "= validators.String() # URL code = validators.String(min=7) allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema): id", "True class AuthorizationResponseValidator(Schema): auth = validators.String() service_user_hash = validators.String() org_user_hash = validators.String() user_push_id", "= validators.String() allow_extra_fields = True class DirectoryValidator(Schema): id = validators.String() service_ids = ForEach(validators.String())", "True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String() # URL code = validators.String(min=7) allow_extra_fields =", "ValidateISODate() date_expires = ValidateISODate() public_key = validators.String() allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode", "name = validators.String() description = validators.String() active = validators.Bool() callback_url = validators.String() allow_extra_fields", "validators.String() date_created = ValidateISODate() service_icon = validators.String() service_id = validators.String() service_name = validators.String()", "service_id = validators.String() service_name = validators.String() allow_extra_fields = True class DirectoryValidator(Schema): id =", "validators, FancyValidator, Invalid, ForEach from dateutil.parser import parse class ValidateISODate(FancyValidator): @staticmethod def _to_python(value,", "\" \"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)\", value, state) return val class", "= validators.String() icon = validators.String() name = validators.String() description = validators.String() active =", "= validators.Bool() date_created = ValidateISODate() date_expires = ValidateISODate() public_key = validators.String() allow_extra_fields =", "validators.String() # UUID response = validators.Bool() device_id = validators.String() allow_extra_fields = True class", "validators.String() ios_certificate_fingerprint = validators.String() active = validators.Bool() allow_extra_fields = True class AuthorizationResponseValidator(Schema): auth", "= True class AuthorizationResponseValidator(Schema): auth = validators.String() service_user_hash = validators.String() org_user_hash = validators.String()", "code = validators.String(min=7) allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema): id = validators.String() name =", "= validators.Bool() device_id = validators.String() allow_extra_fields = True class AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True)", "android_key = validators.String() ios_certificate_fingerprint = validators.String() active = validators.Bool() allow_extra_fields = True class", "validators.Bool() device_id = validators.String() allow_extra_fields = True class AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True) push_package", "= validators.String() user_push_id = validators.String() public_key_id = validators.String() allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema):", "ios_certificate_fingerprint = validators.String() active = validators.Bool() allow_extra_fields = True class AuthorizationResponseValidator(Schema): auth =", "= True class AuthorizeSSEValidator(Schema): service_user_hash = validators.String() api_time = validators.String() allow_extra_fields = True", "service_name = validators.String() allow_extra_fields = True class DirectoryValidator(Schema): id = validators.String() service_ids =", "= validators.Bool() name = validators.String() android_key = validators.String() ios_certificate_fingerprint = validators.String() active =", "= validators.Bool() callback_url = validators.String() allow_extra_fields = True class ServiceSecurityPolicyValidator(Schema): allow_extra_fields = True", "= validators.String() active = validators.Bool() date_created = ValidateISODate() date_expires = ValidateISODate() public_key =", "service_ids = ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium = validators.Bool() name = validators.String() android_key", "allow_extra_fields = True class AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True) allow_extra_fields", "date_expires = ValidateISODate() public_key = validators.String() allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode =", "import parse class ValidateISODate(FancyValidator): @staticmethod def _to_python(value, state): try: val = parse(value) except", "PublicKeyValidator(Schema): id = validators.String() active = validators.Bool() date_created = ValidateISODate() date_expires = ValidateISODate()", "date_created = ValidateISODate() service_icon = validators.String() service_id = validators.String() service_name = validators.String() allow_extra_fields", "= validators.String(min=7) allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema): id = validators.String() name = validators.String()", "premium = validators.Bool() name = validators.String() android_key = validators.String() ios_certificate_fingerprint = validators.String() active", "validators.String() android_key = validators.String() ios_certificate_fingerprint = validators.String() active = validators.Bool() allow_extra_fields = True", "validators.String() active = validators.Bool() allow_extra_fields = True class AuthorizationResponseValidator(Schema): auth = validators.String() service_user_hash", "parse class ValidateISODate(FancyValidator): @staticmethod def _to_python(value, state): try: val = parse(value) except ValueError:", "no offset (i.e. 2010-01-01T01:01:01Z)\", value, state) return val class PublicKeyValidator(Schema): id = validators.String()", "True class AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True) allow_extra_fields = True", "class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String() # URL code = validators.String(min=7) allow_extra_fields = True", "formencode import Schema, validators, FancyValidator, Invalid, ForEach from dateutil.parser import parse class ValidateISODate(FancyValidator):", "allow_extra_fields = True class DirectoryGetSessionsValidator(Schema): auth_request = validators.String() date_created = ValidateISODate() service_icon =", "class AuthorizationResponseValidator(Schema): auth = validators.String() service_user_hash = validators.String() org_user_hash = validators.String() user_push_id =", "validators.Bool() allow_extra_fields = True class AuthorizationResponseValidator(Schema): auth = validators.String() service_user_hash = validators.String() org_user_hash", "ForEach() auth_request = validators.String() # UUID response = validators.Bool() device_id = validators.String() allow_extra_fields", "invalid, it must be ISO 8601 formatted \" \"for UTZ with no offset", "class PublicKeyValidator(Schema): id = validators.String() active = validators.Bool() date_created = ValidateISODate() date_expires =", "Invalid(\"Date/time format is invalid, it must be ISO 8601 formatted \" \"for UTZ", "date_created = ValidateISODate() date_expires = ValidateISODate() public_key = validators.String() allow_extra_fields = True class", "be ISO 8601 formatted \" \"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)\", value,", "format is invalid, it must be ISO 8601 formatted \" \"for UTZ with", "= ForEach(validators.String()) premium = validators.Bool() name = validators.String() android_key = validators.String() ios_certificate_fingerprint =", "validators.String() description = validators.String() active = validators.Bool() callback_url = validators.String() allow_extra_fields = True", "2010-01-01T01:01:01Z)\", value, state) return val class PublicKeyValidator(Schema): id = validators.String() active = validators.Bool()", "= ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium = validators.Bool() name = validators.String() android_key =", "description = validators.String() active = validators.Bool() callback_url = validators.String() allow_extra_fields = True class", "import Schema, validators, FancyValidator, Invalid, ForEach from dateutil.parser import parse class ValidateISODate(FancyValidator): @staticmethod", "value, state) return val class PublicKeyValidator(Schema): id = validators.String() active = validators.Bool() date_created", "not_empty=True) allow_extra_fields = True class AuthorizeSSEValidator(Schema): service_user_hash = validators.String() api_time = validators.String() allow_extra_fields", "= True class ServiceValidator(Schema): id = validators.String() icon = validators.String() name = validators.String()", "validators.Bool() date_created = ValidateISODate() date_expires = ValidateISODate() public_key = validators.String() allow_extra_fields = True", "validators.String() name = validators.String() description = validators.String() active = validators.Bool() callback_url = validators.String()", "ValidateISODate(FancyValidator): @staticmethod def _to_python(value, state): try: val = parse(value) except ValueError: raise Invalid(\"Date/time", "allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String() # URL code = validators.String(min=7)", "validators.String() allow_extra_fields = True class DirectoryValidator(Schema): id = validators.String() service_ids = ForEach(validators.String()) sdk_keys", "True class ServiceValidator(Schema): id = validators.String() icon = validators.String() name = validators.String() description", "validators.String(min=7) allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema): id = validators.String() name = validators.String() status", "ISO 8601 formatted \" \"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)\", value, state)", "= validators.String() allow_extra_fields = True class AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True) push_package = validators.String(if_missing=None,", "@staticmethod def _to_python(value, state): try: val = parse(value) except ValueError: raise Invalid(\"Date/time format", "AuthorizeSSEValidator(Schema): service_user_hash = validators.String() api_time = validators.String() allow_extra_fields = True class ServiceValidator(Schema): id", "= ValidateISODate() service_icon = validators.String() service_id = validators.String() service_name = validators.String() allow_extra_fields =", "= validators.String() org_user_hash = validators.String() user_push_id = validators.String() public_key_id = validators.String() allow_extra_fields =", "DirectoryValidator(Schema): id = validators.String() service_ids = ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium = validators.Bool()", "Invalid, ForEach from dateutil.parser import parse class ValidateISODate(FancyValidator): @staticmethod def _to_python(value, state): try:", "ServiceValidator(Schema): id = validators.String() icon = validators.String() name = validators.String() description = validators.String()", "DirectoryGetDeviceResponseValidator(Schema): id = validators.String() name = validators.String() status = validators.Int() type = validators.String()", "validators.Int() type = validators.String() allow_extra_fields = True class DirectoryGetSessionsValidator(Schema): auth_request = validators.String() date_created", "ValidateISODate() service_icon = validators.String() service_id = validators.String() service_name = validators.String() allow_extra_fields = True", "active = validators.Bool() callback_url = validators.String() allow_extra_fields = True class ServiceSecurityPolicyValidator(Schema): allow_extra_fields =", "allow_extra_fields = True class AuthorizationResponseValidator(Schema): auth = validators.String() service_user_hash = validators.String() org_user_hash =", "(i.e. 2010-01-01T01:01:01Z)\", value, state) return val class PublicKeyValidator(Schema): id = validators.String() active =", "service_user_hash = validators.String() api_time = validators.String() allow_extra_fields = True class ServiceValidator(Schema): id =", "qrcode = validators.String() # URL code = validators.String(min=7) allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema):", "class DirectoryValidator(Schema): id = validators.String() service_ids = ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium =", "response = validators.Bool() device_id = validators.String() allow_extra_fields = True class AuthorizeValidator(Schema): auth_request =", "name = validators.String() android_key = validators.String() ios_certificate_fingerprint = validators.String() active = validators.Bool() allow_extra_fields", "# UUID response = validators.Bool() device_id = validators.String() allow_extra_fields = True class AuthorizeValidator(Schema):", "id = validators.String() service_ids = ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium = validators.Bool() name", "return val class PublicKeyValidator(Schema): id = validators.String() active = validators.Bool() date_created = ValidateISODate()", "validators.String() allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String() # URL code =", "= validators.String() api_time = validators.String() allow_extra_fields = True class ServiceValidator(Schema): id = validators.String()", "validators.String() user_push_id = validators.String() public_key_id = validators.String() allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema): service_pins", "= validators.String() public_key_id = validators.String() allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema): service_pins = ForEach()", "= validators.String() allow_extra_fields = True class ServiceValidator(Schema): id = validators.String() icon = validators.String()", "id = validators.String() icon = validators.String() name = validators.String() description = validators.String() active", "= ValidateISODate() public_key = validators.String() allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String()", "auth_request = validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True) allow_extra_fields = True class AuthorizeSSEValidator(Schema): service_user_hash", "validators.String() name = validators.String() status = validators.Int() type = validators.String() allow_extra_fields = True", "allow_extra_fields = True class ServiceValidator(Schema): id = validators.String() icon = validators.String() name =", "val = parse(value) except ValueError: raise Invalid(\"Date/time format is invalid, it must be", "ValueError: raise Invalid(\"Date/time format is invalid, it must be ISO 8601 formatted \"", "True class DirectoryValidator(Schema): id = validators.String() service_ids = ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium", "push_package = validators.String(if_missing=None, not_empty=True) allow_extra_fields = True class AuthorizeSSEValidator(Schema): service_user_hash = validators.String() api_time", "allow_extra_fields = True class AuthorizeSSEValidator(Schema): service_user_hash = validators.String() api_time = validators.String() allow_extra_fields =", "class AuthorizeSSEValidator(Schema): service_user_hash = validators.String() api_time = validators.String() allow_extra_fields = True class ServiceValidator(Schema):", "public_key = validators.String() allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String() # URL", "= validators.String() allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String() # URL code", "validators.String() status = validators.Int() type = validators.String() allow_extra_fields = True class DirectoryGetSessionsValidator(Schema): auth_request", "True class AuthorizeSSEValidator(Schema): service_user_hash = validators.String() api_time = validators.String() allow_extra_fields = True class", "= validators.String() service_ids = ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium = validators.Bool() name =", "it must be ISO 8601 formatted \" \"for UTZ with no offset (i.e.", "= True class DirectoryGetSessionsValidator(Schema): auth_request = validators.String() date_created = ValidateISODate() service_icon = validators.String()", "icon = validators.String() name = validators.String() description = validators.String() active = validators.Bool() callback_url", "ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium = validators.Bool() name = validators.String() android_key = validators.String()", "state) return val class PublicKeyValidator(Schema): id = validators.String() active = validators.Bool() date_created =", "DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String() # URL code = validators.String(min=7) allow_extra_fields = True class", "validators.String() # URL code = validators.String(min=7) allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema): id =", "validators.String() active = validators.Bool() callback_url = validators.String() allow_extra_fields = True class ServiceSecurityPolicyValidator(Schema): allow_extra_fields", "status = validators.Int() type = validators.String() allow_extra_fields = True class DirectoryGetSessionsValidator(Schema): auth_request =", "= validators.String() ios_certificate_fingerprint = validators.String() active = validators.Bool() allow_extra_fields = True class AuthorizationResponseValidator(Schema):", "validators.Bool() name = validators.String() android_key = validators.String() ios_certificate_fingerprint = validators.String() active = validators.Bool()", "dateutil.parser import parse class ValidateISODate(FancyValidator): @staticmethod def _to_python(value, state): try: val = parse(value)", "_to_python(value, state): try: val = parse(value) except ValueError: raise Invalid(\"Date/time format is invalid,", "validators.String() org_user_hash = validators.String() user_push_id = validators.String() public_key_id = validators.String() allow_extra_fields = True", "class AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True) allow_extra_fields = True class", "except ValueError: raise Invalid(\"Date/time format is invalid, it must be ISO 8601 formatted", "org_user_hash = validators.String() user_push_id = validators.String() public_key_id = validators.String() allow_extra_fields = True class", "from dateutil.parser import parse class ValidateISODate(FancyValidator): @staticmethod def _to_python(value, state): try: val =", "try: val = parse(value) except ValueError: raise Invalid(\"Date/time format is invalid, it must", "= ValidateISODate() date_expires = ValidateISODate() public_key = validators.String() allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema):", "= validators.Bool() allow_extra_fields = True class AuthorizationResponseValidator(Schema): auth = validators.String() service_user_hash = validators.String()", "parse(value) except ValueError: raise Invalid(\"Date/time format is invalid, it must be ISO 8601", "Schema, validators, FancyValidator, Invalid, ForEach from dateutil.parser import parse class ValidateISODate(FancyValidator): @staticmethod def", "validators.String() service_name = validators.String() allow_extra_fields = True class DirectoryValidator(Schema): id = validators.String() service_ids", "id = validators.String() name = validators.String() status = validators.Int() type = validators.String() allow_extra_fields", "allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema): id = validators.String() name = validators.String() status =", "active = validators.Bool() date_created = ValidateISODate() date_expires = ValidateISODate() public_key = validators.String() allow_extra_fields", "validators.String() icon = validators.String() name = validators.String() description = validators.String() active = validators.Bool()", "= validators.String() name = validators.String() description = validators.String() active = validators.Bool() callback_url =", "validators.String() service_id = validators.String() service_name = validators.String() allow_extra_fields = True class DirectoryValidator(Schema): id", "ForEach from dateutil.parser import parse class ValidateISODate(FancyValidator): @staticmethod def _to_python(value, state): try: val", "device_id = validators.String() allow_extra_fields = True class AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True) push_package =", "service_user_hash = validators.String() org_user_hash = validators.String() user_push_id = validators.String() public_key_id = validators.String() allow_extra_fields", "validators.String() api_time = validators.String() allow_extra_fields = True class ServiceValidator(Schema): id = validators.String() icon", "class DirectoryGetSessionsValidator(Schema): auth_request = validators.String() date_created = ValidateISODate() service_icon = validators.String() service_id =", "must be ISO 8601 formatted \" \"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)\",", "validators.String() active = validators.Bool() date_created = ValidateISODate() date_expires = ValidateISODate() public_key = validators.String()", "\"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)\", value, state) return val class PublicKeyValidator(Schema):", "val class PublicKeyValidator(Schema): id = validators.String() active = validators.Bool() date_created = ValidateISODate() date_expires", "= ForEach() auth_request = validators.String() # UUID response = validators.Bool() device_id = validators.String()", "= validators.String() active = validators.Bool() allow_extra_fields = True class AuthorizationResponseValidator(Schema): auth = validators.String()", "AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True) allow_extra_fields = True class AuthorizeSSEValidator(Schema):", "service_pins = ForEach() auth_request = validators.String() # UUID response = validators.Bool() device_id =", "= validators.String() # UUID response = validators.Bool() device_id = validators.String() allow_extra_fields = True", "validators.String(if_missing=None, not_empty=True) allow_extra_fields = True class AuthorizeSSEValidator(Schema): service_user_hash = validators.String() api_time = validators.String()", "raise Invalid(\"Date/time format is invalid, it must be ISO 8601 formatted \" \"for", "validators.String() allow_extra_fields = True class ServiceValidator(Schema): id = validators.String() icon = validators.String() name", "URL code = validators.String(min=7) allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema): id = validators.String() name", "class ValidateISODate(FancyValidator): @staticmethod def _to_python(value, state): try: val = parse(value) except ValueError: raise", "= validators.String() service_name = validators.String() allow_extra_fields = True class DirectoryValidator(Schema): id = validators.String()", "class AuthorizationResponsePackageValidator(Schema): service_pins = ForEach() auth_request = validators.String() # UUID response = validators.Bool()", "auth_request = validators.String() # UUID response = validators.Bool() device_id = validators.String() allow_extra_fields =", "= validators.String() active = validators.Bool() callback_url = validators.String() allow_extra_fields = True class ServiceSecurityPolicyValidator(Schema):", "validators.String() public_key_id = validators.String() allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema): service_pins = ForEach() auth_request", "= True class AuthorizationResponsePackageValidator(Schema): service_pins = ForEach() auth_request = validators.String() # UUID response", "allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema): service_pins = ForEach() auth_request = validators.String() # UUID", "= validators.String(if_missing=None, not_empty=True) allow_extra_fields = True class AuthorizeSSEValidator(Schema): service_user_hash = validators.String() api_time =", "user_push_id = validators.String() public_key_id = validators.String() allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema): service_pins =", "type = validators.String() allow_extra_fields = True class DirectoryGetSessionsValidator(Schema): auth_request = validators.String() date_created =", "allow_extra_fields = True class DirectoryValidator(Schema): id = validators.String() service_ids = ForEach(validators.String()) sdk_keys =", "offset (i.e. 2010-01-01T01:01:01Z)\", value, state) return val class PublicKeyValidator(Schema): id = validators.String() active", "def _to_python(value, state): try: val = parse(value) except ValueError: raise Invalid(\"Date/time format is", "= True class DirectoryValidator(Schema): id = validators.String() service_ids = ForEach(validators.String()) sdk_keys = ForEach(validators.String())", "= validators.Int() type = validators.String() allow_extra_fields = True class DirectoryGetSessionsValidator(Schema): auth_request = validators.String()", "AuthorizationResponseValidator(Schema): auth = validators.String() service_user_hash = validators.String() org_user_hash = validators.String() user_push_id = validators.String()", "= validators.String() status = validators.Int() type = validators.String() allow_extra_fields = True class DirectoryGetSessionsValidator(Schema):", "formatted \" \"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)\", value, state) return val", "class ServiceValidator(Schema): id = validators.String() icon = validators.String() name = validators.String() description =", "service_icon = validators.String() service_id = validators.String() service_name = validators.String() allow_extra_fields = True class", "= True class DirectoryGetDeviceResponseValidator(Schema): id = validators.String() name = validators.String() status = validators.Int()", "True class DirectoryGetDeviceResponseValidator(Schema): id = validators.String() name = validators.String() status = validators.Int() type", "class DirectoryGetDeviceResponseValidator(Schema): id = validators.String() name = validators.String() status = validators.Int() type =", "auth = validators.String() service_user_hash = validators.String() org_user_hash = validators.String() user_push_id = validators.String() public_key_id", "ValidateISODate() public_key = validators.String() allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String() #", "True class AuthorizationResponsePackageValidator(Schema): service_pins = ForEach() auth_request = validators.String() # UUID response =", "UTZ with no offset (i.e. 2010-01-01T01:01:01Z)\", value, state) return val class PublicKeyValidator(Schema): id", "with no offset (i.e. 2010-01-01T01:01:01Z)\", value, state) return val class PublicKeyValidator(Schema): id =", "from formencode import Schema, validators, FancyValidator, Invalid, ForEach from dateutil.parser import parse class", "= validators.String() description = validators.String() active = validators.Bool() callback_url = validators.String() allow_extra_fields =", "= validators.String() service_user_hash = validators.String() org_user_hash = validators.String() user_push_id = validators.String() public_key_id =", "validators.String() service_user_hash = validators.String() org_user_hash = validators.String() user_push_id = validators.String() public_key_id = validators.String()", "= parse(value) except ValueError: raise Invalid(\"Date/time format is invalid, it must be ISO", "validators.String() allow_extra_fields = True class DirectoryGetSessionsValidator(Schema): auth_request = validators.String() date_created = ValidateISODate() service_icon", "validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True) allow_extra_fields = True class AuthorizeSSEValidator(Schema): service_user_hash = validators.String()", "# URL code = validators.String(min=7) allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema): id = validators.String()", "is invalid, it must be ISO 8601 formatted \" \"for UTZ with no", "validators.String() allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema): service_pins = ForEach() auth_request = validators.String() #", "active = validators.Bool() allow_extra_fields = True class AuthorizationResponseValidator(Schema): auth = validators.String() service_user_hash =", "id = validators.String() active = validators.Bool() date_created = ValidateISODate() date_expires = ValidateISODate() public_key", "= validators.String() service_id = validators.String() service_name = validators.String() allow_extra_fields = True class DirectoryValidator(Schema):", "UUID response = validators.Bool() device_id = validators.String() allow_extra_fields = True class AuthorizeValidator(Schema): auth_request", "= validators.String() name = validators.String() status = validators.Int() type = validators.String() allow_extra_fields =", "validators.String() service_ids = ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium = validators.Bool() name = validators.String()", "validators.String() allow_extra_fields = True class AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True)", "auth_request = validators.String() date_created = ValidateISODate() service_icon = validators.String() service_id = validators.String() service_name", "= validators.String() allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema): service_pins = ForEach() auth_request = validators.String()", "name = validators.String() status = validators.Int() type = validators.String() allow_extra_fields = True class", "= validators.String() android_key = validators.String() ios_certificate_fingerprint = validators.String() active = validators.Bool() allow_extra_fields =", "public_key_id = validators.String() allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema): service_pins = ForEach() auth_request =", "sdk_keys = ForEach(validators.String()) premium = validators.Bool() name = validators.String() android_key = validators.String() ios_certificate_fingerprint", "= validators.String() allow_extra_fields = True class DirectoryGetSessionsValidator(Schema): auth_request = validators.String() date_created = ValidateISODate()", "= True class AuthorizeValidator(Schema): auth_request = validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True) allow_extra_fields =", "= validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True) allow_extra_fields = True class AuthorizeSSEValidator(Schema): service_user_hash =", "= True class DirectoryUserDeviceLinkResponseValidator(Schema): qrcode = validators.String() # URL code = validators.String(min=7) allow_extra_fields", "state): try: val = parse(value) except ValueError: raise Invalid(\"Date/time format is invalid, it", "ForEach(validators.String()) premium = validators.Bool() name = validators.String() android_key = validators.String() ios_certificate_fingerprint = validators.String()", "api_time = validators.String() allow_extra_fields = True class ServiceValidator(Schema): id = validators.String() icon =", "AuthorizationResponsePackageValidator(Schema): service_pins = ForEach() auth_request = validators.String() # UUID response = validators.Bool() device_id", "True class DirectoryGetSessionsValidator(Schema): auth_request = validators.String() date_created = ValidateISODate() service_icon = validators.String() service_id" ]
[ "numpy as np input = tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0 =", "np input = tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with", "tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session() as sess:", "conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session() as sess: img = np.array([3,5,5,3]) out = sess.run(conv0,feed_dict={input:img})", "import numpy as np input = tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0", "= tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session() as sess: img = np.array([3,5,5,3]) out = sess.run(conv0,feed_dict={input:img}) print(out.shape)", "dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session() as sess: img = np.array([3,5,5,3]) out =", "tensorflow as tf import numpy as np input = tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1,", "= tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session() as sess: img =", "tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session() as sess: img = np.array([3,5,5,3])", "input = tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session()", "as np input = tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID')", "tf import numpy as np input = tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32)", "= tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session() as", "import tensorflow as tf import numpy as np input = tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter =", "filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session() as sess: img", "as tf import numpy as np input = tf.placeholder(dtype=tf.float32,shape=[5,5,3]) filter = tf.constant(value=1, shape=[3,3,3,5],", "shape=[3,3,3,5], dtype=tf.float32) conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID') with tf.Session() as sess: img = np.array([3,5,5,3]) out" ]
[ "cherries in the game. :type cherry_lst: list of Cherry :param block_size: Size of", "position for the eaten cherry cherry.set_new_random_position(snake_lst, size) # redraw screen with updated snake", "returns only if snake collision occured. \"\"\" while True: # capture events for", "of all snakes in the game. :type snake_lst: list of Snake :param cherry_lst:", "Initializes the game with configuration, defined in config_data. :param config_data: Dictionary, which contains", "refresh_rate) timer = pygame.time.get_ticks() while True: # initialize new game snake_lst, cherry_pos =", "# check which snake's key was pressed and add it to key stack", "in range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down':", "height, Snake.block_size) # check if new cherry position is within any of the", "205)] # player 4 is purple # create snake instances init_snake_lst = []", "event.type == pygame.USEREVENT: # happens on each timer tick for snake in snake_list:", "block_size: Size of one block of snake or cherry in pixels. :type block_size:", "capture events for event in pygame.event.get(): if event.type == pygame.QUIT: # happens when", "\"\"\" while True: # capture events for event in pygame.event.get(): if event.type ==", "when user tries to close window sys.exit() # exit from game elif event.type", "import sys import pygame import random from snake_utility import Snake, Cherry, SnakeGameStatusFlags import", "Dictionary, which contains configuration for the game, such as game window dimensions, number", "main_loop(snake_list, cherry_list): \"\"\" Main loop of the game. This function returns only if", "timer tick for snake in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) # check if there", "# player 2 is blue (255, 255, 50), # player 3 is yellow", "snake for cherry in cherry_list: if snake.block_pos_lst[0] == cherry.position: # append new block", "for the eaten cherry cherry.set_new_random_position(snake_lst, size) # redraw screen with updated snake and", "redraw screen with updated snake and cherry positions redraw_screen(snake_list, cherry_list, block_size) if __name__", "the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new random position for the eaten cherry cherry.set_new_random_position(snake_lst,", "block_size)) # draw cherries for cherry in cherry_lst: pygame.draw.rect(screen, (255, 0, 0), (cherry.position[0],", "redraw_screen(snake_lst, cherry_lst, block_size): \"\"\" Redraws screen with updated snake and cherry positions. :param", "config_file: configuration_data = json.load(config_file) size = width, height = configuration_data[\"main_window_size\"] BLACK = 0,", "to check that cherry will not be placed onto a snake. :type snake_lst:", "occured. \"\"\" while True: # capture events for event in pygame.event.get(): if event.type", "if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE # check if any of the cherries was eaten", "instances init_cherry_lst = [] for i in range(config_data[\"num_cherries\"]): cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"])", "all snake instances present in the game. This is needed to check that", "check if any of the cherries was eaten by the current snake for", "0, 0, 0 refresh_rate = configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"] #", "[(0, 255, 0), # player 1 is green (0, 0, 255), # player", ":type block_size: int \"\"\" # clear screen screen.fill(BLACK) # draw snakes for snake", "if snake collision occured. \"\"\" while True: # capture events for event in", "check if there is collision if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE # check if any", "True: # initialize new game snake_lst, cherry_pos = init_game(configuration_data) # main loop will", "happens on each timer tick for snake in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) #", "snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) # check if there is collision if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE", "block_size: int \"\"\" # clear screen screen.fill(BLACK) # draw snakes for snake in", "new random position for the eaten cherry cherry.set_new_random_position(snake_lst, size) # redraw screen with", "keys = config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left':", "import json def set_new_cherry_pos(snake_lst): \"\"\" Sets new cherry position. :param snake_lst: List, containing", "color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create cherry instances init_cherry_lst = [] for i", "== '__main__': pygame.init() # load configuration data with open('config.json', 'r') as config_file: configuration_data", "configuration for the game, such as game window dimensions, number of snakes, keyboard", "such as game window dimensions, number of snakes, keyboard keys, etc. :type config_data:", "init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst def redraw_screen(snake_lst, cherry_lst, block_size): \"\"\" Redraws screen with updated", "(0, 0, 255), # player 2 is blue (255, 255, 50), # player", "user tries to close window sys.exit() # exit from game elif event.type ==", "timer = pygame.time.get_ticks() while True: # initialize new game snake_lst, cherry_pos = init_game(configuration_data)", "onto a snake. :type snake_lst: list of Snake \"\"\" new_cherry_pos = random.randrange(0, width,", "# draw snakes for snake in snake_lst: for block_pos in snake.block_pos_lst: pygame.draw.rect(screen, snake.color,", "in snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type == pygame.USEREVENT: # happens on each timer tick", ":rtype: tuple of list \"\"\" # colors for snakes snake_colors = [(0, 255,", "for event in pygame.event.get(): if event.type == pygame.QUIT: # happens when user tries", "in pygame.event.get(): if event.type == pygame.QUIT: # happens when user tries to close", "# exit from game elif event.type == pygame.KEYDOWN: # happens on key pressed", "the game. :type cherry_lst: list of Cherry :param block_size: Size of one block", "block_pos[1], block_size, block_size)) # draw cherries for cherry in cherry_lst: pygame.draw.rect(screen, (255, 0,", "pygame.KEYDOWN: # happens on key pressed # check which snake's key was pressed", "game snake_lst, cherry_pos = init_game(configuration_data) # main loop will exit only if collision", "sys.exit() # exit from game elif event.type == pygame.KEYDOWN: # happens on key", "for snake in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) # check if there is collision", "0, 0), (cherry.position[0], cherry.position[1], block_size, block_size)) # update display pygame.display.update() def main_loop(snake_list, cherry_list):", ":param config_data: Dictionary, which contains configuration for the game, such as game window", "# redraw screen with updated snake and cherry positions redraw_screen(snake_list, cherry_list, block_size) if", "i in range(config_data[\"num_cherries\"]): cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst def", "is green (0, 0, 255), # player 2 is blue (255, 255, 50),", "snake_lst: for block_pos in snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1], block_size, block_size)) # draw", "that cherry will not be placed onto a snake. :type snake_lst: list of", "range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]),", "for cherry in cherry_list: if snake.block_pos_lst[0] == cherry.position: # append new block to", "snakes for snake in snake_lst: for block_pos in snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1],", "snake_lst: List, containing all snake instances present in the game. This is needed", "= configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"] # set display screen = pygame.display.set_mode(size) # set", "in the game. :type snake_lst: list of Snake :param cherry_lst: List of all", "configuration_data = json.load(config_file) size = width, height = configuration_data[\"main_window_size\"] BLACK = 0, 0,", "snake instances present in the game. This is needed to check that cherry", "draw cherries for cherry in cherry_lst: pygame.draw.rect(screen, (255, 0, 0), (cherry.position[0], cherry.position[1], block_size,", "from snake_utility import Snake, Cherry, SnakeGameStatusFlags import json def set_new_cherry_pos(snake_lst): \"\"\" Sets new", ":type config_data: dict :return: Lists of initialized snakes and cherries. :rtype: tuple of", "# create cherry instances init_cherry_lst = [] for i in range(config_data[\"num_cherries\"]): cherry =", "position. :param snake_lst: List, containing all snake instances present in the game. This", "will not be placed onto a snake. :type snake_lst: list of Snake \"\"\"", "cherry.position: # append new block to snake that ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) #", "pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create cherry instances", "present in the game. This is needed to check that cherry will not", "(cherry.position[0], cherry.position[1], block_size, block_size)) # update display pygame.display.update() def main_loop(snake_list, cherry_list): \"\"\" Main", "3 is yellow (205, 0, 205)] # player 4 is purple # create", "of Snake :param cherry_lst: List of all cherries in the game. :type cherry_lst:", "cherry positions redraw_screen(snake_list, cherry_list, block_size) if __name__ == '__main__': pygame.init() # load configuration", "== pygame.QUIT: # happens when user tries to close window sys.exit() # exit", "# set display screen = pygame.display.set_mode(size) # set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer =", "a snake. :type snake_lst: list of Snake \"\"\" new_cherry_pos = random.randrange(0, width, Snake.block_size),", "List, containing all snake instances present in the game. This is needed to", "as config_file: configuration_data = json.load(config_file) size = width, height = configuration_data[\"main_window_size\"] BLACK =", "key was pressed and add it to key stack for snake in snake_list:", "the game. :type snake_lst: list of Snake :param cherry_lst: List of all cherries", "BLACK = 0, 0, 0 refresh_rate = configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"] block_size =", "snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1], block_size, block_size)) # draw cherries for cherry in", "the eaten cherry cherry.set_new_random_position(snake_lst, size) # redraw screen with updated snake and cherry", "tuple of list \"\"\" # colors for snakes snake_colors = [(0, 255, 0),", "2 is blue (255, 255, 50), # player 3 is yellow (205, 0,", "keys, etc. :type config_data: dict :return: Lists of initialized snakes and cherries. :rtype:", "cherry instances init_cherry_lst = [] for i in range(config_data[\"num_cherries\"]): cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst,", "that ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new random position for the eaten", "or cherry in pixels. :type block_size: int \"\"\" # clear screen screen.fill(BLACK) #", "player 3 is yellow (205, 0, 205)] # player 4 is purple #", "of initialized snakes and cherries. :rtype: tuple of list \"\"\" # colors for", "while new_cherry_pos in snk.block_pos_lst: new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) return", "= random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) return new_cherry_pos def init_game(config_data): \"\"\" Initializes", "snakes and set new one for snk in snake_lst: while new_cherry_pos in snk.block_pos_lst:", "(205, 0, 205)] # player 4 is purple # create snake instances init_snake_lst", "cherries for cherry in cherry_lst: pygame.draw.rect(screen, (255, 0, 0), (cherry.position[0], cherry.position[1], block_size, block_size))", "needed to check that cherry will not be placed onto a snake. :type", "for the game, such as game window dimensions, number of snakes, keyboard keys,", "in snake_lst: for block_pos in snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1], block_size, block_size)) #", "set new random position for the eaten cherry cherry.set_new_random_position(snake_lst, size) # redraw screen", "initialize new game snake_lst, cherry_pos = init_game(configuration_data) # main loop will exit only", "for cherry in cherry_lst: pygame.draw.rect(screen, (255, 0, 0), (cherry.position[0], cherry.position[1], block_size, block_size)) #", "set new one for snk in snake_lst: while new_cherry_pos in snk.block_pos_lst: new_cherry_pos =", "the game with configuration, defined in config_data. :param config_data: Dictionary, which contains configuration", "in snake_lst: while new_cherry_pos in snk.block_pos_lst: new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height,", "there is collision if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE # check if any of the", "\"\"\" Initializes the game with configuration, defined in config_data. :param config_data: Dictionary, which", ":type cherry_lst: list of Cherry :param block_size: Size of one block of snake", "all cherries in the game. :type cherry_lst: list of Cherry :param block_size: Size", "random position for the eaten cherry cherry.set_new_random_position(snake_lst, size) # redraw screen with updated", "pygame.time.get_ticks() while True: # initialize new game snake_lst, cherry_pos = init_game(configuration_data) # main", "cherry_list, block_size) if __name__ == '__main__': pygame.init() # load configuration data with open('config.json',", "snake_lst: while new_cherry_pos in snk.block_pos_lst: new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size)", "new_cherry_pos def init_game(config_data): \"\"\" Initializes the game with configuration, defined in config_data. :param", "the game, such as game window dimensions, number of snakes, keyboard keys, etc.", "new one for snk in snake_lst: while new_cherry_pos in snk.block_pos_lst: new_cherry_pos = random.randrange(0,", "= [(0, 255, 0), # player 1 is green (0, 0, 255), #", "on each timer tick for snake in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) # check", "snake_lst, cherry_pos = init_game(configuration_data) # main loop will exit only if collision occurs", "# player 3 is yellow (205, 0, 205)] # player 4 is purple", "snake that ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new random position for the", "255, 0), # player 1 is green (0, 0, 255), # player 2", "= [] for i in range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up':", "width, height = configuration_data[\"main_window_size\"] BLACK = 0, 0, 0 refresh_rate = configuration_data[\"refresh_rate\"] start_pos", "is yellow (205, 0, 205)] # player 4 is purple # create snake", "elif event.type == pygame.KEYDOWN: # happens on key pressed # check which snake's", "def set_new_cherry_pos(snake_lst): \"\"\" Sets new cherry position. :param snake_lst: List, containing all snake", "cherry position. :param snake_lst: List, containing all snake instances present in the game.", "'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create cherry instances init_cherry_lst", "return init_snake_lst, init_cherry_lst def redraw_screen(snake_lst, cherry_lst, block_size): \"\"\" Redraws screen with updated snake", "if there is collision if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE # check if any of", "import random from snake_utility import Snake, Cherry, SnakeGameStatusFlags import json def set_new_cherry_pos(snake_lst): \"\"\"", "only if snake collision occured. \"\"\" while True: # capture events for event", "# capture events for event in pygame.event.get(): if event.type == pygame.QUIT: # happens", "is collision if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE # check if any of the cherries", "Snake.block_size) # check if new cherry position is within any of the snakes", "cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst def redraw_screen(snake_lst, cherry_lst, block_size): \"\"\" Redraws screen", "= pygame.display.set_mode(size) # set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer = pygame.time.get_ticks() while True: #", "snake_lst: list of Snake :param cherry_lst: List of all cherries in the game.", "= init_game(configuration_data) # main loop will exit only if collision occurs main_loop(snake_lst, cherry_pos)", "and cherries. :rtype: tuple of list \"\"\" # colors for snakes snake_colors =", "init_cherry_lst def redraw_screen(snake_lst, cherry_lst, block_size): \"\"\" Redraws screen with updated snake and cherry", "# append new block to snake that ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set", "in snake_list: if event.key in [val for _, val in snake.move_keys.items()]: snake.key_stack.append(event.key) elif", "load configuration data with open('config.json', 'r') as config_file: configuration_data = json.load(config_file) size =", "dimensions, number of snakes, keyboard keys, etc. :type config_data: dict :return: Lists of", "if __name__ == '__main__': pygame.init() # load configuration data with open('config.json', 'r') as", "set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer = pygame.time.get_ticks() while True: # initialize new game", "= Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst def redraw_screen(snake_lst, cherry_lst, block_size): \"\"\"", "\"\"\" Sets new cherry position. :param snake_lst: List, containing all snake instances present", "snake.color, (block_pos[0], block_pos[1], block_size, block_size)) # draw cherries for cherry in cherry_lst: pygame.draw.rect(screen,", "num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create cherry instances init_cherry_lst = [] for i in range(config_data[\"num_cherries\"]):", "size) # redraw screen with updated snake and cherry positions redraw_screen(snake_list, cherry_list, block_size)", "and add it to key stack for snake in snake_list: if event.key in", "of snakes, keyboard keys, etc. :type config_data: dict :return: Lists of initialized snakes", "for snk in snake_lst: while new_cherry_pos in snk.block_pos_lst: new_cherry_pos = random.randrange(0, width, Snake.block_size),", "for block_pos in snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1], block_size, block_size)) # draw cherries", "list \"\"\" # colors for snakes snake_colors = [(0, 255, 0), # player", "pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create cherry instances init_cherry_lst =", "Snake.block_size), random.randrange(0, height, Snake.block_size) return new_cherry_pos def init_game(config_data): \"\"\" Initializes the game with", "Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake)", "instances present in the game. This is needed to check that cherry will", "= configuration_data[\"main_window_size\"] BLACK = 0, 0, 0 refresh_rate = configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"]", "= 0, 0, 0 refresh_rate = configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"]", "screen screen.fill(BLACK) # draw snakes for snake in snake_lst: for block_pos in snake.block_pos_lst:", "= [] for i in range(config_data[\"num_cherries\"]): cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return", "game. This is needed to check that cherry will not be placed onto", ":param snake_lst: List, containing all snake instances present in the game. This is", "game elif event.type == pygame.KEYDOWN: # happens on key pressed # check which", "init_game(config_data): \"\"\" Initializes the game with configuration, defined in config_data. :param config_data: Dictionary,", "int \"\"\" # clear screen screen.fill(BLACK) # draw snakes for snake in snake_lst:", "snake. :type snake_lst: list of Snake \"\"\" new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0,", "pygame.display.update() def main_loop(snake_list, cherry_list): \"\"\" Main loop of the game. This function returns", "new cherry position is within any of the snakes and set new one", "cherry in cherry_list: if snake.block_pos_lst[0] == cherry.position: # append new block to snake", "height, Snake.block_size) return new_cherry_pos def init_game(config_data): \"\"\" Initializes the game with configuration, defined", "in range(config_data[\"num_cherries\"]): cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst def redraw_screen(snake_lst,", "pygame.draw.rect(screen, (255, 0, 0), (cherry.position[0], cherry.position[1], block_size, block_size)) # update display pygame.display.update() def", "0, 255), # player 2 is blue (255, 255, 50), # player 3", "cherry_lst: list of Cherry :param block_size: Size of one block of snake or", "snake in snake_lst: for block_pos in snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1], block_size, block_size))", "draw snakes for snake in snake_lst: for block_pos in snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0],", "with updated snake and cherry positions. :param snake_lst: List of all snakes in", "within any of the snakes and set new one for snk in snake_lst:", "pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1], block_size, block_size)) # draw cherries for cherry in cherry_lst:", "positions. :param snake_lst: List of all snakes in the game. :type snake_lst: list", "event.type == pygame.KEYDOWN: # happens on key pressed # check which snake's key", "snake instances init_snake_lst = [] for i in range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i] snake", "to close window sys.exit() # exit from game elif event.type == pygame.KEYDOWN: #", "pygame.QUIT: # happens when user tries to close window sys.exit() # exit from", "if new cherry position is within any of the snakes and set new", "in the game. :type cherry_lst: list of Cherry :param block_size: Size of one", "0, 205)] # player 4 is purple # create snake instances init_snake_lst =", "screen.fill(BLACK) # draw snakes for snake in snake_lst: for block_pos in snake.block_pos_lst: pygame.draw.rect(screen,", "0), (cherry.position[0], cherry.position[1], block_size, block_size)) # update display pygame.display.update() def main_loop(snake_list, cherry_list): \"\"\"", "config_data: Dictionary, which contains configuration for the game, such as game window dimensions,", "cherry in cherry_lst: pygame.draw.rect(screen, (255, 0, 0), (cherry.position[0], cherry.position[1], block_size, block_size)) # update", "snakes, keyboard keys, etc. :type config_data: dict :return: Lists of initialized snakes and", "on key pressed # check which snake's key was pressed and add it", "\"\"\" new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) # check if new", "(255, 0, 0), (cherry.position[0], cherry.position[1], block_size, block_size)) # update display pygame.display.update() def main_loop(snake_list,", "# create snake instances init_snake_lst = [] for i in range(config_data[\"num_snakes\"]): keys =", "pygame import random from snake_utility import Snake, Cherry, SnakeGameStatusFlags import json def set_new_cherry_pos(snake_lst):", "in cherry_list: if snake.block_pos_lst[0] == cherry.position: # append new block to snake that", "eaten cherry cherry.set_new_random_position(snake_lst, size) # redraw screen with updated snake and cherry positions", "redraw_screen(snake_list, cherry_list, block_size) if __name__ == '__main__': pygame.init() # load configuration data with", "cherries was eaten by the current snake for cherry in cherry_list: if snake.block_pos_lst[0]", "with configuration, defined in config_data. :param config_data: Dictionary, which contains configuration for the", "# draw cherries for cherry in cherry_lst: pygame.draw.rect(screen, (255, 0, 0), (cherry.position[0], cherry.position[1],", ":param block_size: Size of one block of snake or cherry in pixels. :type", "== pygame.USEREVENT: # happens on each timer tick for snake in snake_list: snake.get_dir_from_keystack()", "cherry.position[1], block_size, block_size)) # update display pygame.display.update() def main_loop(snake_list, cherry_list): \"\"\" Main loop", "the snakes and set new one for snk in snake_lst: while new_cherry_pos in", "game window dimensions, number of snakes, keyboard keys, etc. :type config_data: dict :return:", "snake.key_stack.append(event.key) elif event.type == pygame.USEREVENT: # happens on each timer tick for snake", "of snake or cherry in pixels. :type block_size: int \"\"\" # clear screen", "window dimensions, number of snakes, keyboard keys, etc. :type config_data: dict :return: Lists", "check which snake's key was pressed and add it to key stack for", "block_size = configuration_data[\"block_size\"] # set display screen = pygame.display.set_mode(size) # set timer pygame.time.set_timer(pygame.USEREVENT,", "game with configuration, defined in config_data. :param config_data: Dictionary, which contains configuration for", "keyboard keys, etc. :type config_data: dict :return: Lists of initialized snakes and cherries.", "# set new random position for the eaten cherry cherry.set_new_random_position(snake_lst, size) # redraw", "random.randrange(0, height, Snake.block_size) return new_cherry_pos def init_game(config_data): \"\"\" Initializes the game with configuration,", "height = configuration_data[\"main_window_size\"] BLACK = 0, 0, 0 refresh_rate = configuration_data[\"refresh_rate\"] start_pos =", "pygame.display.set_mode(size) # set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer = pygame.time.get_ticks() while True: # initialize", "it to key stack for snake in snake_list: if event.key in [val for", "display pygame.display.update() def main_loop(snake_list, cherry_list): \"\"\" Main loop of the game. This function", "import Snake, Cherry, SnakeGameStatusFlags import json def set_new_cherry_pos(snake_lst): \"\"\" Sets new cherry position.", ":type snake_lst: list of Snake \"\"\" new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height,", "list of Cherry :param block_size: Size of one block of snake or cherry", "add it to key stack for snake in snake_list: if event.key in [val", "cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new random position for the eaten cherry cherry.set_new_random_position(snake_lst, size)", "screen with updated snake and cherry positions redraw_screen(snake_list, cherry_list, block_size) if __name__ ==", "Sets new cherry position. :param snake_lst: List, containing all snake instances present in", "etc. :type config_data: dict :return: Lists of initialized snakes and cherries. :rtype: tuple", "(255, 255, 50), # player 3 is yellow (205, 0, 205)] # player", "pressed # check which snake's key was pressed and add it to key", "snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type == pygame.USEREVENT: # happens on each timer tick for", "any of the cherries was eaten by the current snake for cherry in", "snakes snake_colors = [(0, 255, 0), # player 1 is green (0, 0,", "start_pos = configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"] # set display screen = pygame.display.set_mode(size) #", "happens on key pressed # check which snake's key was pressed and add", "snake in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) # check if there is collision if", "the cherries was eaten by the current snake for cherry in cherry_list: if", ":return: Lists of initialized snakes and cherries. :rtype: tuple of list \"\"\" #", "snake_lst: List of all snakes in the game. :type snake_lst: list of Snake", "ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new random position for the eaten cherry", "True: # capture events for event in pygame.event.get(): if event.type == pygame.QUIT: #", "event in pygame.event.get(): if event.type == pygame.QUIT: # happens when user tries to", "create cherry instances init_cherry_lst = [] for i in range(config_data[\"num_cherries\"]): cherry = Cherry(block_size)", "for snake in snake_list: if event.key in [val for _, val in snake.move_keys.items()]:", "SnakeGameStatusFlags import json def set_new_cherry_pos(snake_lst): \"\"\" Sets new cherry position. :param snake_lst: List,", "blue (255, 255, 50), # player 3 is yellow (205, 0, 205)] #", "game. :type cherry_lst: list of Cherry :param block_size: Size of one block of", "in snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1], block_size, block_size)) # draw cherries for cherry", "update display pygame.display.update() def main_loop(snake_list, cherry_list): \"\"\" Main loop of the game. This", "cherry_lst, block_size): \"\"\" Redraws screen with updated snake and cherry positions. :param snake_lst:", "random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) # check if new cherry position is", "# colors for snakes snake_colors = [(0, 255, 0), # player 1 is", "[] for i in range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]),", "cherry_list: if snake.block_pos_lst[0] == cherry.position: # append new block to snake that ate", "snake_utility import Snake, Cherry, SnakeGameStatusFlags import json def set_new_cherry_pos(snake_lst): \"\"\" Sets new cherry", "game, such as game window dimensions, number of snakes, keyboard keys, etc. :type", "config_data: dict :return: Lists of initialized snakes and cherries. :rtype: tuple of list", "Snake :param cherry_lst: List of all cherries in the game. :type cherry_lst: list", "= pygame.time.get_ticks() while True: # initialize new game snake_lst, cherry_pos = init_game(configuration_data) #", "# player 1 is green (0, 0, 255), # player 2 is blue", "sys import pygame import random from snake_utility import Snake, Cherry, SnakeGameStatusFlags import json", "'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create cherry", "(block_pos[0], block_pos[1], block_size, block_size)) # draw cherries for cherry in cherry_lst: pygame.draw.rect(screen, (255,", "Main loop of the game. This function returns only if snake collision occured.", "to key stack for snake in snake_list: if event.key in [val for _,", "= width, height = configuration_data[\"main_window_size\"] BLACK = 0, 0, 0 refresh_rate = configuration_data[\"refresh_rate\"]", "in cherry_lst: pygame.draw.rect(screen, (255, 0, 0), (cherry.position[0], cherry.position[1], block_size, block_size)) # update display", "snake's key was pressed and add it to key stack for snake in", "# initialize new game snake_lst, cherry_pos = init_game(configuration_data) # main loop will exit", "snake_lst: list of Snake \"\"\" new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size)", "for _, val in snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type == pygame.USEREVENT: # happens on", "== cherry.position: # append new block to snake that ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1])", "in the game. This is needed to check that cherry will not be", "block of snake or cherry in pixels. :type block_size: int \"\"\" # clear", "SnakeGameStatusFlags.COLLISION_OCCURENCE # check if any of the cherries was eaten by the current", "0), # player 1 is green (0, 0, 255), # player 2 is", "Snake.block_size), random.randrange(0, height, Snake.block_size) # check if new cherry position is within any", "\"\"\" # colors for snakes snake_colors = [(0, 255, 0), # player 1", "configuration_data[\"main_window_size\"] BLACK = 0, 0, 0 refresh_rate = configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"] block_size", "= configuration_data[\"block_size\"] # set display screen = pygame.display.set_mode(size) # set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate)", "pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create cherry instances init_cherry_lst = [] for", "List of all cherries in the game. :type cherry_lst: list of Cherry :param", "List of all snakes in the game. :type snake_lst: list of Snake :param", "return SnakeGameStatusFlags.COLLISION_OCCURENCE # check if any of the cherries was eaten by the", "while True: # capture events for event in pygame.event.get(): if event.type == pygame.QUIT:", "green (0, 0, 255), # player 2 is blue (255, 255, 50), #", "50), # player 3 is yellow (205, 0, 205)] # player 4 is", "if any of the cherries was eaten by the current snake for cherry", "with open('config.json', 'r') as config_file: configuration_data = json.load(config_file) size = width, height =", "init_cherry_lst = [] for i in range(config_data[\"num_cherries\"]): cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry)", "screen with updated snake and cherry positions. :param snake_lst: List of all snakes", "# set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer = pygame.time.get_ticks() while True: # initialize new", "yellow (205, 0, 205)] # player 4 is purple # create snake instances", "containing all snake instances present in the game. This is needed to check", "snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"],", "# happens on key pressed # check which snake's key was pressed and", "cherry will not be placed onto a snake. :type snake_lst: list of Snake", "in snk.block_pos_lst: new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) return new_cherry_pos def", "happens when user tries to close window sys.exit() # exit from game elif", "was eaten by the current snake for cherry in cherry_list: if snake.block_pos_lst[0] ==", "data with open('config.json', 'r') as config_file: configuration_data = json.load(config_file) size = width, height", "pixels. :type block_size: int \"\"\" # clear screen screen.fill(BLACK) # draw snakes for", "stack for snake in snake_list: if event.key in [val for _, val in", "config_data. :param config_data: Dictionary, which contains configuration for the game, such as game", "snake collision occured. \"\"\" while True: # capture events for event in pygame.event.get():", "collision if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE # check if any of the cherries was", "random.randrange(0, height, Snake.block_size) # check if new cherry position is within any of", "while True: # initialize new game snake_lst, cherry_pos = init_game(configuration_data) # main loop", "Redraws screen with updated snake and cherry positions. :param snake_lst: List of all", "snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new random position for the eaten cherry cherry.set_new_random_position(snake_lst, size) #", "for i in range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right':", ":param cherry_lst: List of all cherries in the game. :type cherry_lst: list of", "new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) return new_cherry_pos def init_game(config_data): \"\"\"", "open('config.json', 'r') as config_file: configuration_data = json.load(config_file) size = width, height = configuration_data[\"main_window_size\"]", "player 4 is purple # create snake instances init_snake_lst = [] for i", "json.load(config_file) size = width, height = configuration_data[\"main_window_size\"] BLACK = 0, 0, 0 refresh_rate", "pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer = pygame.time.get_ticks() while True: # initialize new game snake_lst, cherry_pos", "check that cherry will not be placed onto a snake. :type snake_lst: list", "in [val for _, val in snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type == pygame.USEREVENT: #", "the game. This function returns only if snake collision occured. \"\"\" while True:", "1 is green (0, 0, 255), # player 2 is blue (255, 255,", "close window sys.exit() # exit from game elif event.type == pygame.KEYDOWN: # happens", "eaten by the current snake for cherry in cherry_list: if snake.block_pos_lst[0] == cherry.position:", "def init_game(config_data): \"\"\" Initializes the game with configuration, defined in config_data. :param config_data:", "Size of one block of snake or cherry in pixels. :type block_size: int", "# player 4 is purple # create snake instances init_snake_lst = [] for", "snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE # check if any of the cherries was eaten by", "initialized snakes and cherries. :rtype: tuple of list \"\"\" # colors for snakes", "player 2 is blue (255, 255, 50), # player 3 is yellow (205,", "of Snake \"\"\" new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) # check", "to snake that ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new random position for", "new cherry position. :param snake_lst: List, containing all snake instances present in the", "defined in config_data. :param config_data: Dictionary, which contains configuration for the game, such", "This function returns only if snake collision occured. \"\"\" while True: # capture", "0 refresh_rate = configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"] # set display", "block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create cherry instances init_cherry_lst = [] for i in", "for snakes snake_colors = [(0, 255, 0), # player 1 is green (0,", "# check if there is collision if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE # check if", "This is needed to check that cherry will not be placed onto a", "snakes and cherries. :rtype: tuple of list \"\"\" # colors for snakes snake_colors", "# happens when user tries to close window sys.exit() # exit from game", "in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) # check if there is collision if snake.collision:", "any of the snakes and set new one for snk in snake_lst: while", "snake in snake_list: if event.key in [val for _, val in snake.move_keys.items()]: snake.key_stack.append(event.key)", "be placed onto a snake. :type snake_lst: list of Snake \"\"\" new_cherry_pos =", "snake or cherry in pixels. :type block_size: int \"\"\" # clear screen screen.fill(BLACK)", "collision occured. \"\"\" while True: # capture events for event in pygame.event.get(): if", "snake.block_pos_lst[0] == cherry.position: # append new block to snake that ate the cherry", "by the current snake for cherry in cherry_list: if snake.block_pos_lst[0] == cherry.position: #", "snake and cherry positions. :param snake_lst: List of all snakes in the game.", "Lists of initialized snakes and cherries. :rtype: tuple of list \"\"\" # colors", "cherry_lst: pygame.draw.rect(screen, (255, 0, 0), (cherry.position[0], cherry.position[1], block_size, block_size)) # update display pygame.display.update()", "\"\"\" Redraws screen with updated snake and cherry positions. :param snake_lst: List of", "one for snk in snake_lst: while new_cherry_pos in snk.block_pos_lst: new_cherry_pos = random.randrange(0, width,", "pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create", "= config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])},", "new_cherry_pos in snk.block_pos_lst: new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) return new_cherry_pos", "block_size, block_size)) # draw cherries for cherry in cherry_lst: pygame.draw.rect(screen, (255, 0, 0),", "'r') as config_file: configuration_data = json.load(config_file) size = width, height = configuration_data[\"main_window_size\"] BLACK", "elif event.type == pygame.USEREVENT: # happens on each timer tick for snake in", "event.key in [val for _, val in snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type == pygame.USEREVENT:", "all snakes in the game. :type snake_lst: list of Snake :param cherry_lst: List", "= random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) # check if new cherry position", "position is within any of the snakes and set new one for snk", "clear screen screen.fill(BLACK) # draw snakes for snake in snake_lst: for block_pos in", "if snake.block_pos_lst[0] == cherry.position: # append new block to snake that ate the", "__name__ == '__main__': pygame.init() # load configuration data with open('config.json', 'r') as config_file:", "255, 50), # player 3 is yellow (205, 0, 205)] # player 4", "== pygame.KEYDOWN: # happens on key pressed # check which snake's key was", "from game elif event.type == pygame.KEYDOWN: # happens on key pressed # check", "cherry position is within any of the snakes and set new one for", "snake_list: if event.key in [val for _, val in snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type", "current snake for cherry in cherry_list: if snake.block_pos_lst[0] == cherry.position: # append new", "# happens on each timer tick for snake in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list)", "cherries. :rtype: tuple of list \"\"\" # colors for snakes snake_colors = [(0,", "0, 0 refresh_rate = configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"] # set", "Cherry :param block_size: Size of one block of snake or cherry in pixels.", "Cherry, SnakeGameStatusFlags import json def set_new_cherry_pos(snake_lst): \"\"\" Sets new cherry position. :param snake_lst:", "screen = pygame.display.set_mode(size) # set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer = pygame.time.get_ticks() while True:", ":type snake_lst: list of Snake :param cherry_lst: List of all cherries in the", "list of Snake :param cherry_lst: List of all cherries in the game. :type", "# clear screen screen.fill(BLACK) # draw snakes for snake in snake_lst: for block_pos", "number of snakes, keyboard keys, etc. :type config_data: dict :return: Lists of initialized", "one block of snake or cherry in pixels. :type block_size: int \"\"\" #", "val in snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type == pygame.USEREVENT: # happens on each timer", "configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"] # set display screen = pygame.display.set_mode(size)", "snake_list) # check if there is collision if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE # check", "width, Snake.block_size), random.randrange(0, height, Snake.block_size) return new_cherry_pos def init_game(config_data): \"\"\" Initializes the game", "placed onto a snake. :type snake_lst: list of Snake \"\"\" new_cherry_pos = random.randrange(0,", "window sys.exit() # exit from game elif event.type == pygame.KEYDOWN: # happens on", "in pixels. :type block_size: int \"\"\" # clear screen screen.fill(BLACK) # draw snakes", "key stack for snake in snake_list: if event.key in [val for _, val", "updated snake and cherry positions redraw_screen(snake_list, cherry_list, block_size) if __name__ == '__main__': pygame.init()", "move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) #", "255), # player 2 is blue (255, 255, 50), # player 3 is", "random from snake_utility import Snake, Cherry, SnakeGameStatusFlags import json def set_new_cherry_pos(snake_lst): \"\"\" Sets", "'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"]) init_snake_lst.append(snake) # create cherry instances init_cherry_lst = []", "is within any of the snakes and set new one for snk in", "4 is purple # create snake instances init_snake_lst = [] for i in", "= Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i], block_size=config_data[\"block_size\"], num_of_start_blocks=config_data[\"initial_snake_length\"])", "contains configuration for the game, such as game window dimensions, number of snakes,", "width, Snake.block_size), random.randrange(0, height, Snake.block_size) # check if new cherry position is within", "was pressed and add it to key stack for snake in snake_list: if", "Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst def redraw_screen(snake_lst, cherry_lst, block_size): \"\"\" Redraws", "in config_data. :param config_data: Dictionary, which contains configuration for the game, such as", "snakes in the game. :type snake_lst: list of Snake :param cherry_lst: List of", "cherry_lst: List of all cherries in the game. :type cherry_lst: list of Cherry", "config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst def redraw_screen(snake_lst, cherry_lst, block_size): \"\"\" Redraws screen with", "events for event in pygame.event.get(): if event.type == pygame.QUIT: # happens when user", "which contains configuration for the game, such as game window dimensions, number of", "of the game. This function returns only if snake collision occured. \"\"\" while", "def main_loop(snake_list, cherry_list): \"\"\" Main loop of the game. This function returns only", "is purple # create snake instances init_snake_lst = [] for i in range(config_data[\"num_snakes\"]):", "tries to close window sys.exit() # exit from game elif event.type == pygame.KEYDOWN:", "config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]), 'down': pygame.__getattribute__(keys[2]), 'left': pygame.__getattribute__(keys[3])}, color=snake_colors[i],", "new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) # check if new cherry", "positions redraw_screen(snake_list, cherry_list, block_size) if __name__ == '__main__': pygame.init() # load configuration data", "not be placed onto a snake. :type snake_lst: list of Snake \"\"\" new_cherry_pos", "json def set_new_cherry_pos(snake_lst): \"\"\" Sets new cherry position. :param snake_lst: List, containing all", "\"\"\" Main loop of the game. This function returns only if snake collision", "refresh_rate = configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"] # set display screen", "new game snake_lst, cherry_pos = init_game(configuration_data) # main loop will exit only if", "block_size)) # update display pygame.display.update() def main_loop(snake_list, cherry_list): \"\"\" Main loop of the", "cherry.set_new_random_position(snake_lst, size) # redraw screen with updated snake and cherry positions redraw_screen(snake_list, cherry_list,", "check if new cherry position is within any of the snakes and set", "with updated snake and cherry positions redraw_screen(snake_list, cherry_list, block_size) if __name__ == '__main__':", "Snake, Cherry, SnakeGameStatusFlags import json def set_new_cherry_pos(snake_lst): \"\"\" Sets new cherry position. :param", "and set new one for snk in snake_lst: while new_cherry_pos in snk.block_pos_lst: new_cherry_pos", "cherry_list): \"\"\" Main loop of the game. This function returns only if snake", "snake_colors = [(0, 255, 0), # player 1 is green (0, 0, 255),", "the game. This is needed to check that cherry will not be placed", "timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer = pygame.time.get_ticks() while True: # initialize new game snake_lst,", "pressed and add it to key stack for snake in snake_list: if event.key", "[val for _, val in snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type == pygame.USEREVENT: # happens", "range(config_data[\"num_cherries\"]): cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst def redraw_screen(snake_lst, cherry_lst,", "each timer tick for snake in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) # check if", "= configuration_data[\"refresh_rate\"] start_pos = configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"] # set display screen =", "of the cherries was eaten by the current snake for cherry in cherry_list:", "snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) # check if there is collision if snake.collision: return", "of all cherries in the game. :type cherry_lst: list of Cherry :param block_size:", "instances init_snake_lst = [] for i in range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i] snake =", "append new block to snake that ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new", "display screen = pygame.display.set_mode(size) # set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer = pygame.time.get_ticks() while", "pygame.event.get(): if event.type == pygame.QUIT: # happens when user tries to close window", "and cherry positions. :param snake_lst: List of all snakes in the game. :type", "purple # create snake instances init_snake_lst = [] for i in range(config_data[\"num_snakes\"]): keys", "is needed to check that cherry will not be placed onto a snake.", "list of Snake \"\"\" new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) #", "block_size, block_size)) # update display pygame.display.update() def main_loop(snake_list, cherry_list): \"\"\" Main loop of", "# update display pygame.display.update() def main_loop(snake_list, cherry_list): \"\"\" Main loop of the game.", "dict :return: Lists of initialized snakes and cherries. :rtype: tuple of list \"\"\"", "player 1 is green (0, 0, 255), # player 2 is blue (255,", "configuration_data[\"start_pos\"] block_size = configuration_data[\"block_size\"] # set display screen = pygame.display.set_mode(size) # set timer", "game. This function returns only if snake collision occured. \"\"\" while True: #", "key pressed # check which snake's key was pressed and add it to", "= json.load(config_file) size = width, height = configuration_data[\"main_window_size\"] BLACK = 0, 0, 0", "for i in range(config_data[\"num_cherries\"]): cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst", "cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst, init_cherry_lst def redraw_screen(snake_lst, cherry_lst, block_size):", "of the snakes and set new one for snk in snake_lst: while new_cherry_pos", "and cherry positions redraw_screen(snake_list, cherry_list, block_size) if __name__ == '__main__': pygame.init() # load", "# check if any of the cherries was eaten by the current snake", "colors for snakes snake_colors = [(0, 255, 0), # player 1 is green", "which snake's key was pressed and add it to key stack for snake", "cherry_pos = init_game(configuration_data) # main loop will exit only if collision occurs main_loop(snake_lst,", "block to snake that ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new random position", "cherry in pixels. :type block_size: int \"\"\" # clear screen screen.fill(BLACK) # draw", "set display screen = pygame.display.set_mode(size) # set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer = pygame.time.get_ticks()", "Snake \"\"\" new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) # check if", "\"\"\" # clear screen screen.fill(BLACK) # draw snakes for snake in snake_lst: for", "cherry positions. :param snake_lst: List of all snakes in the game. :type snake_lst:", "as game window dimensions, number of snakes, keyboard keys, etc. :type config_data: dict", "size = width, height = configuration_data[\"main_window_size\"] BLACK = 0, 0, 0 refresh_rate =", "'__main__': pygame.init() # load configuration data with open('config.json', 'r') as config_file: configuration_data =", "block_size) if __name__ == '__main__': pygame.init() # load configuration data with open('config.json', 'r')", "game. :type snake_lst: list of Snake :param cherry_lst: List of all cherries in", "function returns only if snake collision occured. \"\"\" while True: # capture events", "pygame.init() # load configuration data with open('config.json', 'r') as config_file: configuration_data = json.load(config_file)", "of list \"\"\" # colors for snakes snake_colors = [(0, 255, 0), #", "snk in snake_lst: while new_cherry_pos in snk.block_pos_lst: new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0,", "set_new_cherry_pos(snake_lst): \"\"\" Sets new cherry position. :param snake_lst: List, containing all snake instances", "loop of the game. This function returns only if snake collision occured. \"\"\"", "snake.set_new_state(size, snake_list) # check if there is collision if snake.collision: return SnakeGameStatusFlags.COLLISION_OCCURENCE #", "init_snake_lst.append(snake) # create cherry instances init_cherry_lst = [] for i in range(config_data[\"num_cherries\"]): cherry", ":param snake_lst: List of all snakes in the game. :type snake_lst: list of", "event.type == pygame.QUIT: # happens when user tries to close window sys.exit() #", "random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) return new_cherry_pos def init_game(config_data): \"\"\" Initializes the", "pygame.USEREVENT: # happens on each timer tick for snake in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size,", "of Cherry :param block_size: Size of one block of snake or cherry in", "configuration data with open('config.json', 'r') as config_file: configuration_data = json.load(config_file) size = width,", "for snake in snake_lst: for block_pos in snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1], block_size,", "tick for snake in snake_list: snake.get_dir_from_keystack() snake.set_new_state(size, snake_list) # check if there is", "Snake.block_size) return new_cherry_pos def init_game(config_data): \"\"\" Initializes the game with configuration, defined in", "the current snake for cherry in cherry_list: if snake.block_pos_lst[0] == cherry.position: # append", "[] for i in range(config_data[\"num_cherries\"]): cherry = Cherry(block_size) cherry.set_new_random_position(init_snake_lst, config_data[\"main_window_size\"]) init_cherry_lst.append(cherry) return init_snake_lst,", "snake and cherry positions redraw_screen(snake_list, cherry_list, block_size) if __name__ == '__main__': pygame.init() #", "init_snake_lst, init_cherry_lst def redraw_screen(snake_lst, cherry_lst, block_size): \"\"\" Redraws screen with updated snake and", "configuration_data[\"block_size\"] # set display screen = pygame.display.set_mode(size) # set timer pygame.time.set_timer(pygame.USEREVENT, refresh_rate) timer", "new block to snake that ate the cherry snake.block_pos_lst.append(snake.block_pos_lst[-1]) # set new random", "of one block of snake or cherry in pixels. :type block_size: int \"\"\"", "i in range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i], move_keys={'up': pygame.__getattribute__(keys[0]), 'right': pygame.__getattribute__(keys[1]),", "create snake instances init_snake_lst = [] for i in range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i]", "block_size): \"\"\" Redraws screen with updated snake and cherry positions. :param snake_lst: List", "cherry cherry.set_new_random_position(snake_lst, size) # redraw screen with updated snake and cherry positions redraw_screen(snake_list,", "is blue (255, 255, 50), # player 3 is yellow (205, 0, 205)]", "if event.key in [val for _, val in snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type ==", "# load configuration data with open('config.json', 'r') as config_file: configuration_data = json.load(config_file) size", "updated snake and cherry positions. :param snake_lst: List of all snakes in the", "if event.type == pygame.QUIT: # happens when user tries to close window sys.exit()", "configuration, defined in config_data. :param config_data: Dictionary, which contains configuration for the game,", "init_snake_lst = [] for i in range(config_data[\"num_snakes\"]): keys = config_data[\"keys\"][i] snake = Snake(start_pos=config_data[\"start_pos\"][i],", "import pygame import random from snake_utility import Snake, Cherry, SnakeGameStatusFlags import json def", "_, val in snake.move_keys.items()]: snake.key_stack.append(event.key) elif event.type == pygame.USEREVENT: # happens on each", "snk.block_pos_lst: new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size) return new_cherry_pos def init_game(config_data):", "# check if new cherry position is within any of the snakes and", "return new_cherry_pos def init_game(config_data): \"\"\" Initializes the game with configuration, defined in config_data.", "def redraw_screen(snake_lst, cherry_lst, block_size): \"\"\" Redraws screen with updated snake and cherry positions.", "block_pos in snake.block_pos_lst: pygame.draw.rect(screen, snake.color, (block_pos[0], block_pos[1], block_size, block_size)) # draw cherries for", "exit from game elif event.type == pygame.KEYDOWN: # happens on key pressed #" ]
[ "replace_text from .replace_text import append_unique_bytes_to_file from .replace_text import remove_comments_from_bytes from .replace_text import replace_text_in_file", "#from .replace_text import replace_text from .replace_text import append_unique_bytes_to_file from .replace_text import remove_comments_from_bytes from", "import replace_text from .replace_text import append_unique_bytes_to_file from .replace_text import remove_comments_from_bytes from .replace_text import", ".replace_text import replace_text from .replace_text import append_unique_bytes_to_file from .replace_text import remove_comments_from_bytes from .replace_text" ]
[ "log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'),", "No match means nothing to group if (match is not None): if (found_match):", "over observables to see which ones to group for system in system_strs: obs_to_group[system]={}", "group for obs, color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up in cent] #Loop", "subplot only if (0 == n): plt.legend(line_list,[\"idf=\"+str(idf) for idf in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True)", "according to these regular expression # Also specify if they should be plotted", "for n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name,", "final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## #### Plot #### ############## def plot(calcs): for system in system_strs:", "to group for system in system_strs: obs_to_group[system]={} for obs_name in obs_cent_list[system]: found_match=False for", "!= \"log\"): plt.ylim(ymin=0) # Plot legend in first subplot only if (0 ==", "figure out semi-automatically what observables to group together #### ################################################################################# # This is", "for obs, color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up in cent] #Loop over", "n, (key, value) in enumerate(obs_to_group[system].items()): if (value is None): newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue,", "right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if __name__ == '__main__': results = [] for file", "for file in glob.glob(sys.argv[1]): # Load calculations calcs = np.fromfile(file, dtype=np.dtype(bayes_dtype)) entry =", "the plot to have nb_of_cols=4 # COunt how many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) #", "print(\"Non-exclusive grouping. Can't work...\") exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if (not found_match):", "what observables to group together #### ################################################################################# # This is the input: #", "plt.legend(line_list,[\"idf=\"+str(idf) for idf in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if __name__ == '__main__':", "to figure out semi-automatically what observables to group together #### ################################################################################# # This", "This parts figures out how to group observables based on the regular expressions", "stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4) line_list.append(line_type) if (plot_scale !=", "over observable group for obs, color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up in", "markersize=4) line_list.append(line_type) if (plot_scale != \"log\"): plt.ylim(ymin=0) # Plot legend in first subplot", "is the input: # Specifies how observables are grouped according to these regular", "Decide how many columns we want the plot to have nb_of_cols=4 # COunt", "(key, value) in enumerate(obs_to_group[system].items()): if (value is None): newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key)", "not None): if (found_match): print(\"Non-exclusive grouping. Can't work...\") exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label,", "enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) # Loop over observable group", "zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up in cent] #Loop over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for", "# Specifies how observables are grouped according to these regular expression # Also", "on a linear or a log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T", "#### ################################################################################# # This is the input: # Specifies how observables are grouped", "group observables based on the regular expressions obs_to_group={} # Loop over observables to", "fontsize=10) plt.yscale(plot_scale) # Loop over observable group for obs, color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs]", "{} # for system in system_strs: final_obs_grouping[system]={} for n, (key, value) in enumerate(obs_to_group[system].items()):", "Loop over observables to see which ones to group for system in system_strs:", "if they should be plotted on a linear or a log scale regex_obs_to_group_list=[", "in cent] #Loop over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf, line in zip(idf_list, idf_sym):", "== '__main__': results = [] for file in glob.glob(sys.argv[1]): # Load calculations calcs", "file in glob.glob(sys.argv[1]): # Load calculations calcs = np.fromfile(file, dtype=np.dtype(bayes_dtype)) entry = plot(calcs)", "as plt import sys, os, glob import re # Output data format from", "(obs, cent) in enumerate(obs_cent_list.items()): for n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1)", "Count how many observables to plot nb_obs=len(final_obs_grouping[system]) # Decide how many columns we", "sys, os, glob import re # Output data format from configurations import *", "regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group) match=r.match(obs_name) # No match means nothing", "how many columns we want the plot to have nb_of_cols=4 # COunt how", "how to group observables based on the regular expressions obs_to_group={} # Loop over", "observables to group together #### ################################################################################# # This is the input: # Specifies", "#matplotlib.use('Agg') import matplotlib.pyplot as plt import sys, os, glob import re # Output", "specify if they should be plotted on a linear or a log scale", "plt import sys, os, glob import re # Output data format from configurations", "COunt how many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[]", "to these regular expression # Also specify if they should be plotted on", "found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if (not found_match): obs_to_group[system][obs_name]=None # Parse the previous list", "means nothing to group if (match is not None): if (found_match): print(\"Non-exclusive grouping.", "to group if (match is not None): if (found_match): print(\"Non-exclusive grouping. Can't work...\")", "system_strs: final_obs_grouping[system]={} for n, (key, value) in enumerate(obs_to_group[system].items()): if (value is None): newvalue=(n,key)", "Output data format from configurations import * design_pt_to_plot=2 ################################################################################# #### Try to figure", "(r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ] # This parts figures out how to", "dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$", "matplotlib.pyplot as plt import sys, os, glob import re # Output data format", "\\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ] # This", "system_strs: obs_to_group[system]={} for obs_name in obs_cent_list[system]: found_match=False for regex_id, (regex_label, regex_obs_to_group, plot_scale) in", "else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## #### Plot #### ############## def plot(calcs): for system", "# This is the input: # Specifies how observables are grouped according to", "how observables are grouped according to these regular expression # Also specify if", "# No match means nothing to group if (match is not None): if", "# Loop over observable group for obs, color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for", "line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4) line_list.append(line_type) if (plot_scale != \"log\"):", "in first subplot only if (0 == n): plt.legend(line_list,[\"idf=\"+str(idf) for idf in idf_list],loc=\"upper", "over grouped observables #for n, (obs, cent) in enumerate(obs_cent_list.items()): for n, ((regex_id, obs_name,", "(r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'),", "(%)', fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) # Loop over observable group for obs, color", "linear or a log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$", "# Parse the previous list to make something useful out of it final_obs_grouping", "obs_to_group[system][obs_name]=None # Parse the previous list to make something useful out of it", "yerr=stat_uncert, fmt=line, color=color, markersize=4) line_list.append(line_type) if (plot_scale != \"log\"): plt.ylim(ymin=0) # Plot legend", "obs_to_group={} # Loop over observables to see which ones to group for system", "r = re.compile(regex_obs_to_group) match=r.match(obs_name) # No match means nothing to group if (match", "os, glob import re # Output data format from configurations import * design_pt_to_plot=2", "expressions obs_to_group={} # Loop over observables to see which ones to group for", "for idf in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if __name__ == '__main__': results", "(value is None): newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## #### Plot #### ##############", "obs_cent_list[system]: found_match=False for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group) match=r.match(obs_name)", "value) in enumerate(obs_to_group[system].items()): if (value is None): newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ##############", "observables to see which ones to group for system in system_strs: obs_to_group[system]={} for", "in system_strs: obs_to_group[system]={} for obs_name in obs_cent_list[system]: found_match=False for regex_id, (regex_label, regex_obs_to_group, plot_scale)", "############## def plot(calcs): for system in system_strs: # Count how many observables to", "plot to have nb_of_cols=4 # COunt how many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare", "in system_strs: # Count how many observables to plot nb_obs=len(final_obs_grouping[system]) # Decide how", "p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ] #", "is None): newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## #### Plot #### ############## def", "#Loop over grouped observables #for n, (obs, cent) in enumerate(obs_cent_list.items()): for n, ((regex_id,", "import matplotlib.pyplot as plt import sys, os, glob import re # Output data", "for n, (key, value) in enumerate(obs_to_group[system].items()): if (value is None): newvalue=(n,key) else: newvalue=value", "obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if (not found_match): obs_to_group[system][obs_name]=None # Parse the previous list to", "plotted on a linear or a log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle", "= plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over grouped observables #for n, (obs, cent) in enumerate(obs_cent_list.items()):", "cent) in enumerate(obs_cent_list.items()): for n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality", "newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## #### Plot #### ############## def plot(calcs): for", "group if (match is not None): if (found_match): print(\"Non-exclusive grouping. Can't work...\") exit(1)", "to group together #### ################################################################################# # This is the input: # Specifies how", "mid_centrality=[(low+up)/2. for low,up in cent] #Loop over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf, line", "in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4)", "nb_of_cols=4 # COunt how many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure fig =", "semi-automatically what observables to group together #### ################################################################################# # This is the input:", "numpy as np import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt import sys, os,", "n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name, fontsize=10)", "(not found_match): obs_to_group[system][obs_name]=None # Parse the previous list to make something useful out", "plot_scale) in enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group) match=r.match(obs_name) # No match means nothing to", "# for system in system_strs: final_obs_grouping[system]={} for n, (key, value) in enumerate(obs_to_group[system].items()): if", "group for system in system_strs: obs_to_group[system]={} for obs_name in obs_cent_list[system]: found_match=False for regex_id,", "enumerate(obs_to_group[system].items()): if (value is None): newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## #### Plot", "re.compile(regex_obs_to_group) match=r.match(obs_name) # No match means nothing to group if (match is not", "n): plt.legend(line_list,[\"idf=\"+str(idf) for idf in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if __name__ ==", "] # This parts figures out how to group observables based on the", "final_obs_grouping[system]={} for n, (key, value) in enumerate(obs_to_group[system].items()): if (value is None): newvalue=(n,key) else:", "Plot #### ############## def plot(calcs): for system in system_strs: # Count how many", "#plt.savefig(\"obs.pdf\") plt.show() if __name__ == '__main__': results = [] for file in glob.glob(sys.argv[1]):", "plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over grouped observables #for n, (obs, cent) in enumerate(obs_cent_list.items()): for", "if (0 == n): plt.legend(line_list,[\"idf=\"+str(idf) for idf in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show()", "mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4) line_list.append(line_type) if (plot_scale != \"log\"): plt.ylim(ymin=0) # Plot", "# Also specify if they should be plotted on a linear or a", "dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ] # This parts figures", "figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over grouped observables #for n, (obs, cent)", "= re.compile(regex_obs_to_group) match=r.match(obs_name) # No match means nothing to group if (match is", "cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up in cent] #Loop over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf,", "rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over grouped", "in obs_cent_list[system]: found_match=False for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group)", "useful out of it final_obs_grouping = {} # for system in system_strs: final_obs_grouping[system]={}", "if (plot_scale != \"log\"): plt.ylim(ymin=0) # Plot legend in first subplot only if", "color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up in cent] #Loop over delta-f idf_list=[0,1,2,3]", "plot_scale) if (not found_match): obs_to_group[system][obs_name]=None # Parse the previous list to make something", "(0 == n): plt.legend(line_list,[\"idf=\"+str(idf) for idf in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if", "newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## #### Plot #### ############## def plot(calcs): for system in", "line_list.append(line_type) if (plot_scale != \"log\"): plt.ylim(ymin=0) # Plot legend in first subplot only", "[] for file in glob.glob(sys.argv[1]): # Load calculations calcs = np.fromfile(file, dtype=np.dtype(bayes_dtype)) entry", "plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) # Loop over observable group for obs,", "design_pt_to_plot=2 ################################################################################# #### Try to figure out semi-automatically what observables to group together", "#Loop over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf, line in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot]", "obs, color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up in cent] #Loop over delta-f", "################################################################################# #### Try to figure out semi-automatically what observables to group together ####", "for system in system_strs: final_obs_grouping[system]={} for n, (key, value) in enumerate(obs_to_group[system].items()): if (value", "a linear or a log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'),", "(regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group) match=r.match(obs_name) # No match means", "of it final_obs_grouping = {} # for system in system_strs: final_obs_grouping[system]={} for n,", "#### ############## def plot(calcs): for system in system_strs: # Count how many observables", "import numpy as np import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt import sys,", "observable group for obs, color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up in cent]", "system in system_strs: obs_to_group[system]={} for obs_name in obs_cent_list[system]: found_match=False for regex_id, (regex_label, regex_obs_to_group,", "cent] #Loop over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf, line in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot]", "np import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt import sys, os, glob import", "the regular expressions obs_to_group={} # Loop over observables to see which ones to", "found_match=False for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group) match=r.match(obs_name) #", "if (value is None): newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## #### Plot ####", "see which ones to group for system in system_strs: obs_to_group[system]={} for obs_name in", "obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) #", "from configurations import * design_pt_to_plot=2 ################################################################################# #### Try to figure out semi-automatically what", "system in system_strs: final_obs_grouping[system]={} for n, (key, value) in enumerate(obs_to_group[system].items()): if (value is", "match means nothing to group if (match is not None): if (found_match): print(\"Non-exclusive", "None): newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## #### Plot #### ############## def plot(calcs):", "'__main__': results = [] for file in glob.glob(sys.argv[1]): # Load calculations calcs =", "#for n, (obs, cent) in enumerate(obs_cent_list.items()): for n, ((regex_id, obs_name, plot_scale), obs_list) in", "as np import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt import sys, os, glob", "# Prepare figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over grouped observables #for n,", "import re # Output data format from configurations import * design_pt_to_plot=2 ################################################################################# ####", "many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over", "nothing to group if (match is not None): if (found_match): print(\"Non-exclusive grouping. Can't", "observables based on the regular expressions obs_to_group={} # Loop over observables to see", "# Decide how many columns we want the plot to have nb_of_cols=4 #", "are grouped according to these regular expression # Also specify if they should", "def plot(calcs): for system in system_strs: # Count how many observables to plot", "* design_pt_to_plot=2 ################################################################################# #### Try to figure out semi-automatically what observables to group", "############## #### Plot #### ############## def plot(calcs): for system in system_strs: # Count", "be plotted on a linear or a log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p", "it final_obs_grouping = {} # for system in system_strs: final_obs_grouping[system]={} for n, (key,", "#!/usr/bin/env python3 import numpy as np import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt", "plt.show() if __name__ == '__main__': results = [] for file in glob.glob(sys.argv[1]): #", "these regular expression # Also specify if they should be plotted on a", "\\rangle$ fluct',\"pT_fluct\",'linear'), ] # This parts figures out how to group observables based", "data format from configurations import * design_pt_to_plot=2 ################################################################################# #### Try to figure out", "################################################################################# # This is the input: # Specifies how observables are grouped according", "mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4) line_list.append(line_type) if (plot_scale", "idf_sym=['D','o','^','.'] for idf, line in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality, mean_values,", "in enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group) match=r.match(obs_name) # No match means nothing to group", "work...\") exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if (not found_match): obs_to_group[system][obs_name]=None # Parse", "idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if __name__ == '__main__': results = [] for", "the input: # Specifies how observables are grouped according to these regular expression", "= plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4) line_list.append(line_type) if (plot_scale != \"log\"): plt.ylim(ymin=0)", "plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) # Loop over observable group for", "out semi-automatically what observables to group together #### ################################################################################# # This is the", "regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group) match=r.match(obs_name) # No match", "in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if __name__ == '__main__': results = []", "only if (0 == n): plt.legend(line_list,[\"idf=\"+str(idf) for idf in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\")", "is not None): if (found_match): print(\"Non-exclusive grouping. Can't work...\") exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id,", "enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group) match=r.match(obs_name) # No match means nothing to group if", "list to make something useful out of it final_obs_grouping = {} # for", "a log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'),", "in enumerate(obs_to_group[system].items()): if (value is None): newvalue=(n,key) else: newvalue=value final_obs_grouping[system].setdefault(newvalue, []).append(key) ############## ####", "group together #### ################################################################################# # This is the input: # Specifies how observables", "plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) # Loop", "= [] for file in glob.glob(sys.argv[1]): # Load calculations calcs = np.fromfile(file, dtype=np.dtype(bayes_dtype))", "# Plot legend in first subplot only if (0 == n): plt.legend(line_list,[\"idf=\"+str(idf) for", "(r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T", "system in system_strs: # Count how many observables to plot nb_obs=len(final_obs_grouping[system]) # Decide", "the previous list to make something useful out of it final_obs_grouping = {}", "# Count how many observables to plot nb_obs=len(final_obs_grouping[system]) # Decide how many columns", "Prepare figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over grouped observables #for n, (obs,", "out of it final_obs_grouping = {} # for system in system_strs: final_obs_grouping[system]={} for", "for low,up in cent] #Loop over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf, line in", "should be plotted on a linear or a log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'),", "scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'),", "want the plot to have nb_of_cols=4 # COunt how many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols))", "observables are grouped according to these regular expression # Also specify if they", "(r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ] # This parts figures out how to group", "for obs_name in obs_cent_list[system]: found_match=False for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list): r", "(plot_scale != \"log\"): plt.ylim(ymin=0) # Plot legend in first subplot only if (0", "to have nb_of_cols=4 # COunt how many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure", "idf in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if __name__ == '__main__': results =", "Plot legend in first subplot only if (0 == n): plt.legend(line_list,[\"idf=\"+str(idf) for idf", "regex_label, plot_scale) if (not found_match): obs_to_group[system][obs_name]=None # Parse the previous list to make", "# This parts figures out how to group observables based on the regular", "first subplot only if (0 == n): plt.legend(line_list,[\"idf=\"+str(idf) for idf in idf_list],loc=\"upper right\",fontsize=10)", "they should be plotted on a linear or a log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p", "regular expressions obs_to_group={} # Loop over observables to see which ones to group", "$\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ]", "make something useful out of it final_obs_grouping = {} # for system in", "fmt=line, color=color, markersize=4) line_list.append(line_type) if (plot_scale != \"log\"): plt.ylim(ymin=0) # Plot legend in", "many observables to plot nb_obs=len(final_obs_grouping[system]) # Decide how many columns we want the", "if __name__ == '__main__': results = [] for file in glob.glob(sys.argv[1]): # Load", "obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) # Loop over", "re # Output data format from configurations import * design_pt_to_plot=2 ################################################################################# #### Try", "fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over grouped observables #for n, (obs, cent) in", "in enumerate(obs_cent_list.items()): for n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)',", "grouping. Can't work...\") exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if (not found_match): obs_to_group[system][obs_name]=None", "have nb_of_cols=4 # COunt how many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure fig", "to see which ones to group for system in system_strs: obs_to_group[system]={} for obs_name", "input: # Specifies how observables are grouped according to these regular expression #", "obs_name in obs_cent_list[system]: found_match=False for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list): r =", "delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf, line in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ =", "# Loop over observables to see which ones to group for system in", "idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf, line in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality,", "parts figures out how to group observables based on the regular expressions obs_to_group={}", "import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt import sys, os, glob import re", "final_obs_grouping = {} # for system in system_strs: final_obs_grouping[system]={} for n, (key, value)", "color=color, markersize=4) line_list.append(line_type) if (plot_scale != \"log\"): plt.ylim(ymin=0) # Plot legend in first", "plt.yscale(plot_scale) # Loop over observable group for obs, color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2.", "obs_to_group[system]={} for obs_name in obs_cent_list[system]: found_match=False for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list):", "matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt import sys, os, glob import re #", "fluct',\"pT_fluct\",'linear'), ] # This parts figures out how to group observables based on", "Can't work...\") exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if (not found_match): obs_to_group[system][obs_name]=None #", "on the regular expressions obs_to_group={} # Loop over observables to see which ones", "Also specify if they should be plotted on a linear or a log", "in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) # Loop over observable", "columns we want the plot to have nb_of_cols=4 # COunt how many rows", "None): if (found_match): print(\"Non-exclusive grouping. Can't work...\") exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale)", "to plot nb_obs=len(final_obs_grouping[system]) # Decide how many columns we want the plot to", "(r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ] # This parts figures out", "exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if (not found_match): obs_to_group[system][obs_name]=None # Parse the", "python3 import numpy as np import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt import", "# Output data format from configurations import * design_pt_to_plot=2 ################################################################################# #### Try to", "plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) # Loop over observable group for obs, color in zip(obs_list,'rgbrgbrgb'):", "in system_strs: final_obs_grouping[system]={} for n, (key, value) in enumerate(obs_to_group[system].items()): if (value is None):", "expression # Also specify if they should be plotted on a linear or", "#### Plot #### ############## def plot(calcs): for system in system_strs: # Count how", "if (found_match): print(\"Non-exclusive grouping. Can't work...\") exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if", "together #### ################################################################################# # This is the input: # Specifies how observables are", "needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over grouped observables", "enumerate(obs_cent_list.items()): for n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10)", "__name__ == '__main__': results = [] for file in glob.glob(sys.argv[1]): # Load calculations", "over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf, line in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_", "legend in first subplot only if (0 == n): plt.legend(line_list,[\"idf=\"+str(idf) for idf in", "for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list): r = re.compile(regex_obs_to_group) match=r.match(obs_name) # No", "Parse the previous list to make something useful out of it final_obs_grouping =", "we want the plot to have nb_of_cols=4 # COunt how many rows needed", "ones to group for system in system_strs: obs_to_group[system]={} for obs_name in obs_cent_list[system]: found_match=False", "n, (obs, cent) in enumerate(obs_cent_list.items()): for n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()):", "This is the input: # Specifies how observables are grouped according to these", "previous list to make something useful out of it final_obs_grouping = {} #", "plt.ylim(ymin=0) # Plot legend in first subplot only if (0 == n): plt.legend(line_list,[\"idf=\"+str(idf)", "how many observables to plot nb_obs=len(final_obs_grouping[system]) # Decide how many columns we want", "format from configurations import * design_pt_to_plot=2 ################################################################################# #### Try to figure out semi-automatically", "p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ] # This parts figures out how to group observables", "match=r.match(obs_name) # No match means nothing to group if (match is not None):", "based on the regular expressions obs_to_group={} # Loop over observables to see which", "\"log\"): plt.ylim(ymin=0) # Plot legend in first subplot only if (0 == n):", "plot nb_obs=len(final_obs_grouping[system]) # Decide how many columns we want the plot to have", "= {} # for system in system_strs: final_obs_grouping[system]={} for n, (key, value) in", "if (match is not None): if (found_match): print(\"Non-exclusive grouping. Can't work...\") exit(1) else:", "idf, line in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line,", "system_strs: # Count how many observables to plot nb_obs=len(final_obs_grouping[system]) # Decide how many", "for system in system_strs: obs_to_group[system]={} for obs_name in obs_cent_list[system]: found_match=False for regex_id, (regex_label,", "for idf, line in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert,", "results = [] for file in glob.glob(sys.argv[1]): # Load calculations calcs = np.fromfile(file,", "Specifies how observables are grouped according to these regular expression # Also specify", "nb_obs=len(final_obs_grouping[system]) # Decide how many columns we want the plot to have nb_of_cols=4", "(r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ] # This parts figures out how", "figures out how to group observables based on the regular expressions obs_to_group={} #", "which ones to group for system in system_strs: obs_to_group[system]={} for obs_name in obs_cent_list[system]:", "something useful out of it final_obs_grouping = {} # for system in system_strs:", "idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4) line_list.append(line_type) if", "to make something useful out of it final_obs_grouping = {} # for system", "plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if __name__ == '__main__': results = [] for file in", "how many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop", "line in zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color,", "== n): plt.legend(line_list,[\"idf=\"+str(idf) for idf in idf_list],loc=\"upper right\",fontsize=10) plt.tight_layout(True) #plt.savefig(\"obs.pdf\") plt.show() if __name__", "regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle", "observables to plot nb_obs=len(final_obs_grouping[system]) # Decide how many columns we want the plot", "or a log scale regex_obs_to_group_list=[ (r'$\\pi$/K/p dN/dy',\"dN_dy_(pion|kaon|proton)\",'log'), (r'$\\pi$/K/p $\\langle p_T \\rangle$',\"mean_pT_(pion|kaon|proton)\",'linear'), (r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'),", "(r'$\\Lambda/\\Omega/\\Xi$ dN/dy',\"dN_dy_(Lambda|Omega|Xi)\",'log'), (r'$v_n\\{2\\}$',\"v[2-5+]2\",'linear'), (r'$dN_{ch}/d\\eta$',\"dNch_deta\",'log'), (r'$dE_T/d\\eta$',\"dET_deta\",'log'), (r'$\\langle p_T \\rangle$ fluct',\"pT_fluct\",'linear'), ] # This parts", "else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if (not found_match): obs_to_group[system][obs_name]=None # Parse the previous", "import sys, os, glob import re # Output data format from configurations import", "observables #for n, (obs, cent) in enumerate(obs_cent_list.items()): for n, ((regex_id, obs_name, plot_scale), obs_list)", "regular expression # Also specify if they should be plotted on a linear", "grouped observables #for n, (obs, cent) in enumerate(obs_cent_list.items()): for n, ((regex_id, obs_name, plot_scale),", "plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4) line_list.append(line_type) if (plot_scale != \"log\"): plt.ylim(ymin=0) #", "import * design_pt_to_plot=2 ################################################################################# #### Try to figure out semi-automatically what observables to", "configurations import * design_pt_to_plot=2 ################################################################################# #### Try to figure out semi-automatically what observables", "line_list=[] #Loop over grouped observables #for n, (obs, cent) in enumerate(obs_cent_list.items()): for n,", "grouped according to these regular expression # Also specify if they should be", "low,up in cent] #Loop over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.'] for idf, line in zip(idf_list,", "out how to group observables based on the regular expressions obs_to_group={} # Loop", "plot(calcs): for system in system_strs: # Count how many observables to plot nb_obs=len(final_obs_grouping[system])", "nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows)) line_list=[] #Loop over grouped observables #for", "for system in system_strs: # Count how many observables to plot nb_obs=len(final_obs_grouping[system]) #", "# COunt how many rows needed nb_of_rows=int(np.ceil(nb_obs/nb_of_cols)) # Prepare figure fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows))", "Loop over observable group for obs, color in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up", "(found_match): print(\"Non-exclusive grouping. Can't work...\") exit(1) else: found_match=True obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale) if (not", "fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale) # Loop over observable group for obs, color in", "found_match): obs_to_group[system][obs_name]=None # Parse the previous list to make something useful out of", "[]).append(key) ############## #### Plot #### ############## def plot(calcs): for system in system_strs: #", "many columns we want the plot to have nb_of_cols=4 # COunt how many", "#### Try to figure out semi-automatically what observables to group together #### #################################################################################", "Try to figure out semi-automatically what observables to group together #### ################################################################################# #", "if (not found_match): obs_to_group[system][obs_name]=None # Parse the previous list to make something useful", "((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()): plt.subplot(nb_of_rows,nb_of_cols,n+1) plt.xlabel(r'Centrality (%)', fontsize=10) plt.ylabel(obs_name, fontsize=10) plt.yscale(plot_scale)", "(match is not None): if (found_match): print(\"Non-exclusive grouping. Can't work...\") exit(1) else: found_match=True", "zip(idf_list, idf_sym): mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot] stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot] line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4) line_list.append(line_type)", "in zip(obs_list,'rgbrgbrgb'): cent=obs_cent_list[system][obs] mid_centrality=[(low+up)/2. for low,up in cent] #Loop over delta-f idf_list=[0,1,2,3] idf_sym=['D','o','^','.']", "to group observables based on the regular expressions obs_to_group={} # Loop over observables", "glob import re # Output data format from configurations import * design_pt_to_plot=2 #################################################################################" ]
[ "check numpy version versionLong = numpy.__version__ version = versionLong.split('.') print(\" Numpy version detected", "MAC-OS INSTALLATION\") print(\" ===============================================================\\n\") print(\" Add the following line to ~/.bashrc :\\n\") #print('", "user manual for instructions.\\n\\n\") else: print(\"Operating system \",osName,\" not known.\") input(\" Press Enter", "Install Pyhon 3.x\\n\") else: print(\" Not OK\\n\\n Please install Python 2.6.x or 2.7.x\\n\")", "# be grateful if you could cite the book. # # # #", "Solids and Structures' # # <NAME>, <NAME>, <NAME> and <NAME> # # <NAME>", "Not OK\\n\\n Please install Matplotlib 1.0.x or higher\\n\") versionLong = PYQT_VERSION_STR version =", "' ) if int(version[0]) >= 5: print(\" OK\") else: print(\" Not OK\\n\\n Please", "'+path+'\\PyFEM.py %1') batfile.close() print(\"\\n WINDOWS INSTALLATION\") print(\" ===============================================================\\n\") #print(\" Add the following path", "# # The latest stable version can be downloaded from the web-site: #", "When using csh or tcsh add the following lines to\") print(\" ~/.cshrc or", "open source and intended for educational and scientific # # purposes only. If", "or 2.7.x\\n\") # check numpy version versionLong = numpy.__version__ version = versionLong.split('.') print(\"", "# # A github repository, with the most up to date version of", "version versionLong = sys.version.split(' ') version = versionLong[0].split('.') print(\" Python version detected %10s", "the following line\") print(\" to ~/.bashrc :\\n\") print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3", "export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When using csh or tcsh add", "detected %10s : \" %(versionLong) , end=' ' ) if int(version[0]) == 0", "PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") # get operating system osName = sys.platform # check python", "# get operating system osName = sys.platform # check python version versionLong =", "%10s : \" %(versionLong[0]) , end=' ' ) if int(version[0]) == 3 and", "\"w.exe\": fexec = fexec[:-5] + \".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close() print(\"\\n WINDOWS", "# Disclaimer: # # The authors reserve all rights but do not guarantee", "note that PyFEM has been migrated to Python 3.x\\n\") print(\" Install Pyhon 3.x\\n\")", "do not guarantee that the code is # # free from errors. Furthermore,", "= sys.version.split(' ') version = versionLong[0].split('.') print(\" Python version detected %10s : \"", "code is # # free from errors. Furthermore, the authors shall not be", "to\") print(\" ~/.cshrc or ~/.tcshrc :\\n\") print(\" setenv PYTHONPATH \"+path) print(\" alias pyfem", "further instructions.\\n\\n\") elif osName[:6] == \"darwin\": print(\"\\n MAC-OS INSTALLATION\") print(\" ===============================================================\\n\") print(\" Add", "print(\"\\n LINUX INSTALLATION\") print(\" ===============================================================\\n\") print(\" When using a bash shell, add the", "book. # # # # Disclaimer: # # The authors reserve all rights", "# # # # The code is written by <NAME>, <NAME> and <NAME>.", "Finite Element Analysis of Solids and Structures' # # <NAME>, <NAME>, <NAME> and", "version = versionLong.split('.') print(\" Scipy version detected %10s : \" %(versionLong) , end='", "been migrated to Python 3.x\\n\") print(\" Install Pyhon 3.x\\n\") else: print(\" Not OK\\n\\n", "the following line to ~/.bashrc :\\n\") #print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\")", "The code is written by <NAME>, <NAME> and <NAME>. # # # #", "1.0.x or higher\\n\") versionLong = PYQT_VERSION_STR version = versionLong.split('.') print(\" PyQt version detected", "scipy.__version__ version = versionLong.split('.') print(\" Scipy version detected %10s : \" %(versionLong) ,", "===============================================================\\n\") #print(\" Add the following path to PYTHONPATH and PATH:\\n\") #print(\" \",path,\"\\n\") print(\"", "that the code is # # free from errors. Furthermore, the authors shall", "print(\" OK\") elif int(version[0]) == 2: print(\" Please note that PyFEM has been", "Please install Scipy 0.9.x or higher\\n\") versionLong = matplotlib.__version__ version = versionLong.split('.') print(\"", "following path to PYTHONPATH and PATH:\\n\") #print(\" \",path,\"\\n\") print(\" ===============================================================\\n\") print(\" Installation successful!\")", "<NAME> and <NAME> # # <NAME> and Sons, 2012, ISBN 978-0470666449 # #", "following line to ~/.bashrc :\\n\") #print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\"", "intended for educational and scientific # # purposes only. If you use PyFEM", "check python version versionLong = sys.version.split(' ') version = versionLong[0].split('.') print(\" Python version", "OK\\n\\n Please install PyQt 5.x or higher\\n\") # get current path path =", "# get current path path = os.getcwd() if osName[:5] == \"linux\": print(\"\\n LINUX", "<NAME>, <NAME> and <NAME> # # <NAME> and Sons, 2012, ISBN 978-0470666449 #", "1.6.x or higher\\n\") # check scipy version versionLong = scipy.__version__ version = versionLong.split('.')", "' ) if int(version[0]) == 3 and int(version[1]) >= 6: print(\" OK\") elif", "print(\" Numpy version detected %10s : \" %(versionLong) , end=' ' ) if", "OK\\n\\n Please install Python 2.6.x or 2.7.x\\n\") # check numpy version versionLong =", "and int(version[1]) >= 0: print(\" OK\") else: print(\" Not OK\\n\\n Please install Matplotlib", ") fexec = sys.executable if fexec[-5:] == \"w.exe\": fexec = fexec[:-5] + \".exe\"", "you use PyFEM in your research, the developers would # # be grateful", "# # The code is open source and intended for educational and scientific", "your research, the developers would # # be grateful if you could cite", "\"win\": batfile = open( 'pyfem.bat' , 'w' ) fexec = sys.executable if fexec[-5:]", "can be found here: # # https://github.com/jjcremmers/PyFEM # # # # The code", "Not OK\\n\\n Please install Python 2.6.x or 2.7.x\\n\") # check numpy version versionLong", "end=' ' ) if int(version[0]) == 3 and int(version[1]) >= 6: print(\" OK\")", ">= 6: print(\" OK\") else: print(\" Not OK\\n\\n Please install Numpy 1.6.x or", "versionLong.split('.') print(\" PyQt version detected %10s : \" %(versionLong) , end=' ' )", "versionLong[0].split('.') print(\" Python version detected %10s : \" %(versionLong[0]) , end=' ' )", "most up to date version of the code, # # can be found", "and intended for educational and scientific # # purposes only. If you use", ") if int(version[0]) == 0 and int(version[1]) >= 9: print(\" OK\") elif int(version[0])", "instructions.\\n\\n\") elif osName[:6] == \"darwin\": print(\"\\n MAC-OS INSTALLATION\") print(\" ===============================================================\\n\") print(\" Add the", "# # # # 'Non-Linear Finite Element Analysis of Solids and Structures' #", "<NAME> and Sons, 2012, ISBN 978-0470666449 # # # # The code is", "# # # 'Non-Linear Finite Element Analysis of Solids and Structures' # #", ", end=' ' ) if int(version[0]) == 3 and int(version[1]) >= 6: print(\"", "event caused by the use of the program. # ############################################################################ import os,sys,numpy,scipy,matplotlib from", "= versionLong[0].split('.') print(\" Python version detected %10s : \" %(versionLong[0]) , end=' '", "or ~/.tcshrc :\\n\") print(\" setenv PYTHONPATH \"+path) print(\" alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\"", "pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the user manual", "reserve all rights but do not guarantee that the code is # #", "Element Analysis of Solids and Structures' # # <NAME>, <NAME>, <NAME> and <NAME>", "# # purposes only. If you use PyFEM in your research, the developers", "# check python version versionLong = sys.version.split(' ') version = versionLong[0].split('.') print(\" Python", "print(\" OK\") else: print(\" Not OK\\n\\n Please install Numpy 1.6.x or higher\\n\") #", "0: print(\" OK\") else: print(\" Not OK\\n\\n Please install Matplotlib 1.0.x or higher\\n\")", "= versionLong.split('.') print(\" Scipy version detected %10s : \" %(versionLong) , end=' '", "written by <NAME>, <NAME> and <NAME>. # # # # The latest stable", "int(version[0]) == 0 and int(version[1]) >= 9: print(\" OK\") elif int(version[0]) >= 1", "detected %10s : \" %(versionLong) , end=' ' ) if int(version[0]) == 1", ", end=' ' ) if int(version[0]) >= 5: print(\" OK\") else: print(\" Not", "versionLong.split('.') print(\" Numpy version detected %10s : \" %(versionLong) , end=' ' )", "and Structures' # # <NAME>, <NAME>, <NAME> and <NAME> # # <NAME> and", "# # # Disclaimer: # # The authors reserve all rights but do", "from PyQt5.Qt import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") # get operating system osName = sys.platform", "can be downloaded from the web-site: # # http://www.wiley.com/go/deborst # # # #", "OK\\n\\n Please install Numpy 1.6.x or higher\\n\") # check scipy version versionLong =", "978-0470666449 # # # # The code is written by <NAME>, <NAME> and", "elif osName[:3] == \"win\": batfile = open( 'pyfem.bat' , 'w' ) fexec =", "A github repository, with the most up to date version of the code,", "OK\") elif int(version[0]) == 2: print(\" Please note that PyFEM has been migrated", "osName[:3] == \"win\": batfile = open( 'pyfem.bat' , 'w' ) fexec = sys.executable", "all rights but do not guarantee that the code is # # free", "int(version[1]) >= 6: print(\" OK\") elif int(version[0]) == 2: print(\" Please note that", "or tcsh add the following lines to\") print(\" ~/.cshrc or ~/.tcshrc :\\n\") print(\"", "%10s : \" %(versionLong) , end=' ' ) if int(version[0]) == 0 and", "of the code, # # can be found here: # # https://github.com/jjcremmers/PyFEM #", "version = versionLong[0].split('.') print(\" Python version detected %10s : \" %(versionLong[0]) , end='", "versionLong = scipy.__version__ version = versionLong.split('.') print(\" Scipy version detected %10s : \"", "authors reserve all rights but do not guarantee that the code is #", "int(version[0]) >= 1 and int(version[1]) >= 0: print(\" OK\") else: print(\" Not OK\\n\\n", "research, the developers would # # be grateful if you could cite the", "the authors shall not be liable in any # # event caused by", "scientific # # purposes only. If you use PyFEM in your research, the", "to Python 3.x\\n\") print(\" Install Pyhon 3.x\\n\") else: print(\" Not OK\\n\\n Please install", "%(versionLong) , end=' ' ) if int(version[0]) >= 1 and int(version[1]) >= 0:", "user manual for further instructions.\\n\\n\") elif osName[:6] == \"darwin\": print(\"\\n MAC-OS INSTALLATION\") print(\"", "Python 3.x\\n\") print(\" Install Pyhon 3.x\\n\") else: print(\" Not OK\\n\\n Please install Python", "2012, ISBN 978-0470666449 # # # # The code is written by <NAME>,", "the code, # # can be found here: # # https://github.com/jjcremmers/PyFEM # #", "\"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the user manual for further", "detected %10s : \" %(versionLong[0]) , end=' ' ) if int(version[0]) == 3", "int(version[0]) == 3 and int(version[1]) >= 6: print(\" OK\") elif int(version[0]) == 2:", "print(\" Add the following line to ~/.bashrc :\\n\") #print(' export PYTHONPATH=\"'+path+'\"') print(\" alias", "~/.cshrc or ~/.tcshrc :\\n\") print(\" setenv PYTHONPATH \"+path) print(\" alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\")", "print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the user", "print(\" to ~/.bashrc :\\n\") print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When", "print(\" Install Pyhon 3.x\\n\") else: print(\" Not OK\\n\\n Please install Python 2.6.x or", "Not OK\\n\\n Please install Numpy 1.6.x or higher\\n\") # check scipy version versionLong", "int(version[1]) >= 0: print(\" OK\") else: print(\" Not OK\\n\\n Please install Matplotlib 1.0.x", "= fexec[:-5] + \".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close() print(\"\\n WINDOWS INSTALLATION\") print(\"", "to date version of the code, # # can be found here: #", "===============================================================\\n\") print(\" When using a bash shell, add the following line\") print(\" to", "print(\" PyQt version detected %10s : \" %(versionLong) , end=' ' ) if", "See the user manual for further instructions.\\n\\n\") elif osName[:6] == \"darwin\": print(\"\\n MAC-OS", "print(\" When using a bash shell, add the following line\") print(\" to ~/.bashrc", "Pyhon 3.x\\n\") else: print(\" Not OK\\n\\n Please install Python 2.6.x or 2.7.x\\n\") #", "0: print(\" OK\") else: print(\" Not OK\\n\\n Please install Scipy 0.9.x or higher\\n\")", "end=' ' ) if int(version[0]) >= 1 and int(version[1]) >= 0: print(\" OK\")", "\"+path+\"/PyFEM.py'\\n\") print(\" When using csh or tcsh add the following lines to\") print(\"", "== 3 and int(version[1]) >= 6: print(\" OK\") elif int(version[0]) == 2: print(\"", "but do not guarantee that the code is # # free from errors.", ") if int(version[0]) == 3 and int(version[1]) >= 6: print(\" OK\") elif int(version[0])", "sys.platform # check python version versionLong = sys.version.split(' ') version = versionLong[0].split('.') print(\"", "== \"win\": batfile = open( 'pyfem.bat' , 'w' ) fexec = sys.executable if", "PATH:\\n\") #print(\" \",path,\"\\n\") print(\" ===============================================================\\n\") print(\" Installation successful!\") print(\" See the user manual", "2.7.x\\n\") # check numpy version versionLong = numpy.__version__ version = versionLong.split('.') print(\" Numpy", "check scipy version versionLong = scipy.__version__ version = versionLong.split('.') print(\" Scipy version detected", "print(\" See the user manual for further instructions.\\n\\n\") elif osName[:6] == \"darwin\": print(\"\\n", "Matplotlib 1.0.x or higher\\n\") versionLong = PYQT_VERSION_STR version = versionLong.split('.') print(\" PyQt version", "python version versionLong = sys.version.split(' ') version = versionLong[0].split('.') print(\" Python version detected", "is part of PyFEM, the code that accompanies the book: # # #", "~/.bashrc :\\n\") print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When using csh", ": \" %(versionLong) , end=' ' ) if int(version[0]) == 1 and int(version[1])", "= scipy.__version__ version = versionLong.split('.') print(\" Scipy version detected %10s : \" %(versionLong)", "be downloaded from the web-site: # # http://www.wiley.com/go/deborst # # # # A", "here: # # https://github.com/jjcremmers/PyFEM # # # # The code is open source", "print(\" Not OK\\n\\n Please install Python 2.6.x or 2.7.x\\n\") # check numpy version", "%(versionLong[0]) , end=' ' ) if int(version[0]) == 3 and int(version[1]) >= 6:", "6: print(\" OK\") elif int(version[0]) == 2: print(\" Please note that PyFEM has", "current path path = os.getcwd() if osName[:5] == \"linux\": print(\"\\n LINUX INSTALLATION\") print(\"", "PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the", "Sons, 2012, ISBN 978-0470666449 # # # # The code is written by", ">= 9: print(\" OK\") elif int(version[0]) >= 1 and int(version[1]) >= 0: print(\"", "print(\"\\n WINDOWS INSTALLATION\") print(\" ===============================================================\\n\") #print(\" Add the following path to PYTHONPATH and", "===============================================================\\n\") # get operating system osName = sys.platform # check python version versionLong", "= sys.executable if fexec[-5:] == \"w.exe\": fexec = fexec[:-5] + \".exe\" print(fexec) batfile.write(fexec+'", "%(versionLong) , end=' ' ) if int(version[0]) == 0 and int(version[1]) >= 9:", "\" %(versionLong) , end=' ' ) if int(version[0]) >= 1 and int(version[1]) >=", "# The code is open source and intended for educational and scientific #", "print(\" OK\") else: print(\" Not OK\\n\\n Please install Scipy 0.9.x or higher\\n\") versionLong", "code, # # can be found here: # # https://github.com/jjcremmers/PyFEM # # #", "# # be grateful if you could cite the book. # # #", "print(\" Installation succesful\") print(\" See the user manual for further instructions.\\n\\n\") elif osName[:6]", "fexec[:-5] + \".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close() print(\"\\n WINDOWS INSTALLATION\") print(\" ===============================================================\\n\")", "# <NAME>, <NAME>, <NAME> and <NAME> # # <NAME> and Sons, 2012, ISBN", "latest stable version can be downloaded from the web-site: # # http://www.wiley.com/go/deborst #", "print(\"\\n MAC-OS INSTALLATION\") print(\" ===============================================================\\n\") print(\" Add the following line to ~/.bashrc :\\n\")", "\"linux\": print(\"\\n LINUX INSTALLATION\") print(\" ===============================================================\\n\") print(\" When using a bash shell, add", ":\\n\") print(\" setenv PYTHONPATH \"+path) print(\" alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\"", "or higher\\n\") # check scipy version versionLong = scipy.__version__ version = versionLong.split('.') print(\"", "print(\" ~/.cshrc or ~/.tcshrc :\\n\") print(\" setenv PYTHONPATH \"+path) print(\" alias pyfem 'python3", "detected %10s : \" %(versionLong) , end=' ' ) if int(version[0]) >= 5:", "https://github.com/jjcremmers/PyFEM # # # # The code is open source and intended for", "# # # # The latest stable version can be downloaded from the", "3.x\\n\") else: print(\" Not OK\\n\\n Please install Python 2.6.x or 2.7.x\\n\") # check", "Python file is part of PyFEM, the code that accompanies the book: #", "to ~/.bashrc :\\n\") #print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\"", "LINUX INSTALLATION\") print(\" ===============================================================\\n\") print(\" When using a bash shell, add the following", "== 2: print(\" Please note that PyFEM has been migrated to Python 3.x\\n\")", "or higher\\n\") # get current path path = os.getcwd() if osName[:5] == \"linux\":", "= matplotlib.__version__ version = versionLong.split('.') print(\" Matplotlib version detected %10s : \" %(versionLong)", "higher\\n\") versionLong = matplotlib.__version__ version = versionLong.split('.') print(\" Matplotlib version detected %10s :", "Matplotlib version detected %10s : \" %(versionLong) , end=' ' ) if int(version[0])", "be grateful if you could cite the book. # # # # Disclaimer:", "only. If you use PyFEM in your research, the developers would # #", "alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the user manual", "This Python file is part of PyFEM, the code that accompanies the book:", "detected %10s : \" %(versionLong) , end=' ' ) if int(version[0]) >= 1", "int(version[0]) == 2: print(\" Please note that PyFEM has been migrated to Python", "PyFEM, the code that accompanies the book: # # # # 'Non-Linear Finite", "alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When using csh or tcsh add the following lines", "path to PYTHONPATH and PATH:\\n\") #print(\" \",path,\"\\n\") print(\" ===============================================================\\n\") print(\" Installation successful!\") print(\"", "# check scipy version versionLong = scipy.__version__ version = versionLong.split('.') print(\" Scipy version", "if int(version[0]) >= 5: print(\" OK\") else: print(\" Not OK\\n\\n Please install PyQt", "\".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close() print(\"\\n WINDOWS INSTALLATION\") print(\" ===============================================================\\n\") #print(\" Add", "not be liable in any # # event caused by the use of", "repository, with the most up to date version of the code, # #", "else: print(\" Not OK\\n\\n Please install Scipy 0.9.x or higher\\n\") versionLong = matplotlib.__version__", "===============================================================\\n\") print(\" Installation successful!\") print(\" See the user manual for instructions.\\n\\n\") else: print(\"Operating", "Numpy 1.6.x or higher\\n\") # check scipy version versionLong = scipy.__version__ version =", "that PyFEM has been migrated to Python 3.x\\n\") print(\" Install Pyhon 3.x\\n\") else:", "is written by <NAME>, <NAME> and <NAME>. # # # # The latest", "1 and int(version[1]) >= 6: print(\" OK\") else: print(\" Not OK\\n\\n Please install", "5: print(\" OK\") else: print(\" Not OK\\n\\n Please install PyQt 5.x or higher\\n\")", "cite the book. # # # # Disclaimer: # # The authors reserve", "print(\" Not OK\\n\\n Please install Matplotlib 1.0.x or higher\\n\") versionLong = PYQT_VERSION_STR version", "int(version[0]) == 1 and int(version[1]) >= 6: print(\" OK\") else: print(\" Not OK\\n\\n", "version can be downloaded from the web-site: # # http://www.wiley.com/go/deborst # # #", "# 'Non-Linear Finite Element Analysis of Solids and Structures' # # <NAME>, <NAME>,", "Please note that PyFEM has been migrated to Python 3.x\\n\") print(\" Install Pyhon", "install Scipy 0.9.x or higher\\n\") versionLong = matplotlib.__version__ version = versionLong.split('.') print(\" Matplotlib", "OK\") else: print(\" Not OK\\n\\n Please install PyQt 5.x or higher\\n\") # get", "if int(version[0]) >= 1 and int(version[1]) >= 0: print(\" OK\") else: print(\" Not", ">= 0: print(\" OK\") else: print(\" Not OK\\n\\n Please install Matplotlib 1.0.x or", "install PyQt 5.x or higher\\n\") # get current path path = os.getcwd() if", "import os,sys,numpy,scipy,matplotlib from PyQt5.Qt import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") # get operating system osName", "else: print(\" Not OK\\n\\n Please install PyQt 5.x or higher\\n\") # get current", ") if int(version[0]) == 1 and int(version[1]) >= 6: print(\" OK\") else: print(\"", "The authors reserve all rights but do not guarantee that the code is", "int(version[0]) >= 5: print(\" OK\") else: print(\" Not OK\\n\\n Please install PyQt 5.x", "# event caused by the use of the program. # ############################################################################ import os,sys,numpy,scipy,matplotlib", "print(\" setenv PYTHONPATH \"+path) print(\" alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation", ": \" %(versionLong[0]) , end=' ' ) if int(version[0]) == 3 and int(version[1])", "print(\" Please note that PyFEM has been migrated to Python 3.x\\n\") print(\" Install", "PYQT_VERSION_STR version = versionLong.split('.') print(\" PyQt version detected %10s : \" %(versionLong) ,", "if osName[:5] == \"linux\": print(\"\\n LINUX INSTALLATION\") print(\" ===============================================================\\n\") print(\" When using a", "%(versionLong) , end=' ' ) if int(version[0]) == 1 and int(version[1]) >= 6:", "version versionLong = numpy.__version__ version = versionLong.split('.') print(\" Numpy version detected %10s :", "'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the user manual for", "elif int(version[0]) == 2: print(\" Please note that PyFEM has been migrated to", "educational and scientific # # purposes only. If you use PyFEM in your", "print(\" Scipy version detected %10s : \" %(versionLong) , end=' ' ) if", "elif osName[:6] == \"darwin\": print(\"\\n MAC-OS INSTALLATION\") print(\" ===============================================================\\n\") print(\" Add the following", "the web-site: # # http://www.wiley.com/go/deborst # # # # A github repository, with", "print(\" When using csh or tcsh add the following lines to\") print(\" ~/.cshrc", "===============================================================\\n\") print(\" Installation succesful\") print(\" See the user manual for further instructions.\\n\\n\") elif", "# # # The latest stable version can be downloaded from the web-site:", "you could cite the book. # # # # Disclaimer: # # The", "If you use PyFEM in your research, the developers would # # be", "# # The authors reserve all rights but do not guarantee that the", "liable in any # # event caused by the use of the program.", ">= 0: print(\" OK\") else: print(\" Not OK\\n\\n Please install Scipy 0.9.x or", "# # The code is written by <NAME>, <NAME> and <NAME>. # #", "\"+path) print(\" alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See", "int(version[1]) >= 9: print(\" OK\") elif int(version[0]) >= 1 and int(version[1]) >= 0:", "manual for further instructions.\\n\\n\") elif osName[:3] == \"win\": batfile = open( 'pyfem.bat' ,", "batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close() print(\"\\n WINDOWS INSTALLATION\") print(\" ===============================================================\\n\") #print(\" Add the following", "higher\\n\") # get current path path = os.getcwd() if osName[:5] == \"linux\": print(\"\\n", "# # 'Non-Linear Finite Element Analysis of Solids and Structures' # # <NAME>,", "# can be found here: # # https://github.com/jjcremmers/PyFEM # # # # The", "matplotlib.__version__ version = versionLong.split('.') print(\" Matplotlib version detected %10s : \" %(versionLong) ,", "# The code is written by <NAME>, <NAME> and <NAME>. # # #", "lines to\") print(\" ~/.cshrc or ~/.tcshrc :\\n\") print(\" setenv PYTHONPATH \"+path) print(\" alias", "alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the user", "with the most up to date version of the code, # # can", "be liable in any # # event caused by the use of the", "<NAME> # # <NAME> and Sons, 2012, ISBN 978-0470666449 # # # #", "print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close() print(\"\\n WINDOWS INSTALLATION\") print(\" ===============================================================\\n\") #print(\" Add the", "# # # The code is open source and intended for educational and", "Installation succesful\") print(\" See the user manual for further instructions.\\n\\n\") elif osName[:3] ==", "of PyFEM, the code that accompanies the book: # # # # 'Non-Linear", "WINDOWS INSTALLATION\") print(\" ===============================================================\\n\") #print(\" Add the following path to PYTHONPATH and PATH:\\n\")", "Furthermore, the authors shall not be liable in any # # event caused", "Disclaimer: # # The authors reserve all rights but do not guarantee that", "succesful\") print(\" See the user manual for further instructions.\\n\\n\") elif osName[:6] == \"darwin\":", "\"darwin\": print(\"\\n MAC-OS INSTALLATION\") print(\" ===============================================================\\n\") print(\" Add the following line to ~/.bashrc", "When using a bash shell, add the following line\") print(\" to ~/.bashrc :\\n\")", "be found here: # # https://github.com/jjcremmers/PyFEM # # # # The code is", "# http://www.wiley.com/go/deborst # # # # A github repository, with the most up", "the code that accompanies the book: # # # # 'Non-Linear Finite Element", "Python version detected %10s : \" %(versionLong[0]) , end=' ' ) if int(version[0])", ": \" %(versionLong) , end=' ' ) if int(version[0]) >= 1 and int(version[1])", "\" %(versionLong) , end=' ' ) if int(version[0]) == 1 and int(version[1]) >=", "ISBN 978-0470666449 # # # # The code is written by <NAME>, <NAME>", "= versionLong.split('.') print(\" Matplotlib version detected %10s : \" %(versionLong) , end=' '", "print(\"\\n ===============================================================\\n\") # get operating system osName = sys.platform # check python version", "# # # # Disclaimer: # # The authors reserve all rights but", "PYTHONPATH and PATH:\\n\") #print(\" \",path,\"\\n\") print(\" ===============================================================\\n\") print(\" Installation successful!\") print(\" See the", "version detected %10s : \" %(versionLong) , end=' ' ) if int(version[0]) ==", "the user manual for further instructions.\\n\\n\") elif osName[:3] == \"win\": batfile = open(", "# A github repository, with the most up to date version of the", "PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When using csh or tcsh add the", "int(version[1]) >= 0: print(\" OK\") else: print(\" Not OK\\n\\n Please install Scipy 0.9.x", "PyQt5.Qt import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") # get operating system osName = sys.platform #", "up to date version of the code, # # can be found here:", "print(\" Installation succesful\") print(\" See the user manual for further instructions.\\n\\n\") elif osName[:3]", "print(\" See the user manual for instructions.\\n\\n\") else: print(\"Operating system \",osName,\" not known.\")", "is open source and intended for educational and scientific # # purposes only.", "OK\\n\\n Please install Matplotlib 1.0.x or higher\\n\") versionLong = PYQT_VERSION_STR version = versionLong.split('.')", "sys.executable if fexec[-5:] == \"w.exe\": fexec = fexec[:-5] + \".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py", "caused by the use of the program. # ############################################################################ import os,sys,numpy,scipy,matplotlib from PyQt5.Qt", "csh or tcsh add the following lines to\") print(\" ~/.cshrc or ~/.tcshrc :\\n\")", "guarantee that the code is # # free from errors. Furthermore, the authors", ", end=' ' ) if int(version[0]) == 0 and int(version[1]) >= 9: print(\"", "the user manual for instructions.\\n\\n\") else: print(\"Operating system \",osName,\" not known.\") input(\" Press", "# # <NAME>, <NAME>, <NAME> and <NAME> # # <NAME> and Sons, 2012,", "') version = versionLong[0].split('.') print(\" Python version detected %10s : \" %(versionLong[0]) ,", "print(\" Not OK\\n\\n Please install PyQt 5.x or higher\\n\") # get current path", "#print(\" Add the following path to PYTHONPATH and PATH:\\n\") #print(\" \",path,\"\\n\") print(\" ===============================================================\\n\")", "path = os.getcwd() if osName[:5] == \"linux\": print(\"\\n LINUX INSTALLATION\") print(\" ===============================================================\\n\") print(\"", ">= 5: print(\" OK\") else: print(\" Not OK\\n\\n Please install PyQt 5.x or", "' ) if int(version[0]) == 0 and int(version[1]) >= 9: print(\" OK\") elif", "= PYQT_VERSION_STR version = versionLong.split('.') print(\" PyQt version detected %10s : \" %(versionLong)", "pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When using csh or tcsh add the following lines to\")", "sys.version.split(' ') version = versionLong[0].split('.') print(\" Python version detected %10s : \" %(versionLong[0])", "pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the user manual for", "use PyFEM in your research, the developers would # # be grateful if", "# # # # A github repository, with the most up to date", "osName[:6] == \"darwin\": print(\"\\n MAC-OS INSTALLATION\") print(\" ===============================================================\\n\") print(\" Add the following line", "print(\" See the user manual for further instructions.\\n\\n\") elif osName[:3] == \"win\": batfile", "add the following line\") print(\" to ~/.bashrc :\\n\") print(' export PYTHONPATH=\"'+path+'\"') print(\" alias", "The latest stable version can be downloaded from the web-site: # # http://www.wiley.com/go/deborst", "following lines to\") print(\" ~/.cshrc or ~/.tcshrc :\\n\") print(\" setenv PYTHONPATH \"+path) print(\"", "PyFEM has been migrated to Python 3.x\\n\") print(\" Install Pyhon 3.x\\n\") else: print(\"", "the developers would # # be grateful if you could cite the book.", "<NAME>, <NAME>, <NAME> and <NAME> # # <NAME> and Sons, 2012, ISBN 978-0470666449", "import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") # get operating system osName = sys.platform # check", ", 'w' ) fexec = sys.executable if fexec[-5:] == \"w.exe\": fexec = fexec[:-5]", ", end=' ' ) if int(version[0]) >= 1 and int(version[1]) >= 0: print(\"", "print(\" Python version detected %10s : \" %(versionLong[0]) , end=' ' ) if", "numpy.__version__ version = versionLong.split('.') print(\" Numpy version detected %10s : \" %(versionLong) ,", "and int(version[1]) >= 9: print(\" OK\") elif int(version[0]) >= 1 and int(version[1]) >=", "version = versionLong.split('.') print(\" Matplotlib version detected %10s : \" %(versionLong) , end='", "stable version can be downloaded from the web-site: # # http://www.wiley.com/go/deborst # #", "found here: # # https://github.com/jjcremmers/PyFEM # # # # The code is open", "for further instructions.\\n\\n\") elif osName[:3] == \"win\": batfile = open( 'pyfem.bat' , 'w'", "Please install Numpy 1.6.x or higher\\n\") # check scipy version versionLong = scipy.__version__", "install Matplotlib 1.0.x or higher\\n\") versionLong = PYQT_VERSION_STR version = versionLong.split('.') print(\" PyQt", "add the following lines to\") print(\" ~/.cshrc or ~/.tcshrc :\\n\") print(\" setenv PYTHONPATH", ">= 6: print(\" OK\") elif int(version[0]) == 2: print(\" Please note that PyFEM", "higher\\n\") # check scipy version versionLong = scipy.__version__ version = versionLong.split('.') print(\" Scipy", "# purposes only. If you use PyFEM in your research, the developers would", "print(\" Not OK\\n\\n Please install Scipy 0.9.x or higher\\n\") versionLong = matplotlib.__version__ version", "in any # # event caused by the use of the program. #", "version of the code, # # can be found here: # # https://github.com/jjcremmers/PyFEM", "# # <NAME> and Sons, 2012, ISBN 978-0470666449 # # # # The", "# # # The code is written by <NAME>, <NAME> and <NAME>. #", "Add the following path to PYTHONPATH and PATH:\\n\") #print(\" \",path,\"\\n\") print(\" ===============================================================\\n\") print(\"", "install Python 2.6.x or 2.7.x\\n\") # check numpy version versionLong = numpy.__version__ version", "\" %(versionLong[0]) , end=' ' ) if int(version[0]) == 3 and int(version[1]) >=", "print(\" Installation successful!\") print(\" See the user manual for instructions.\\n\\n\") else: print(\"Operating system", "fexec = fexec[:-5] + \".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close() print(\"\\n WINDOWS INSTALLATION\")", "# check numpy version versionLong = numpy.__version__ version = versionLong.split('.') print(\" Numpy version", "OK\\n\\n Please install Scipy 0.9.x or higher\\n\") versionLong = matplotlib.__version__ version = versionLong.split('.')", "# This Python file is part of PyFEM, the code that accompanies the", "print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the user manual for further instructions.\\n\\n\")", "the program. # ############################################################################ import os,sys,numpy,scipy,matplotlib from PyQt5.Qt import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") #", "if int(version[0]) == 1 and int(version[1]) >= 6: print(\" OK\") else: print(\" Not", "# ############################################################################ import os,sys,numpy,scipy,matplotlib from PyQt5.Qt import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") # get operating", "6: print(\" OK\") else: print(\" Not OK\\n\\n Please install Numpy 1.6.x or higher\\n\")", "%10s : \" %(versionLong) , end=' ' ) if int(version[0]) >= 1 and", "for instructions.\\n\\n\") else: print(\"Operating system \",osName,\" not known.\") input(\" Press Enter to continue...\")", "Please install Matplotlib 1.0.x or higher\\n\") versionLong = PYQT_VERSION_STR version = versionLong.split('.') print(\"", "# The latest stable version can be downloaded from the web-site: # #", "open( 'pyfem.bat' , 'w' ) fexec = sys.executable if fexec[-5:] == \"w.exe\": fexec", "2: print(\" Please note that PyFEM has been migrated to Python 3.x\\n\") print(\"", "Installation succesful\") print(\" See the user manual for further instructions.\\n\\n\") elif osName[:6] ==", ">= 1 and int(version[1]) >= 0: print(\" OK\") else: print(\" Not OK\\n\\n Please", "version detected %10s : \" %(versionLong[0]) , end=' ' ) if int(version[0]) ==", "for further instructions.\\n\\n\") elif osName[:6] == \"darwin\": print(\"\\n MAC-OS INSTALLATION\") print(\" ===============================================================\\n\") print(\"", "############################################################################ # This Python file is part of PyFEM, the code that accompanies", "2.6.x or 2.7.x\\n\") # check numpy version versionLong = numpy.__version__ version = versionLong.split('.')", "shell, add the following line\") print(\" to ~/.bashrc :\\n\") print(' export PYTHONPATH=\"'+path+'\"') print(\"", "' ) if int(version[0]) == 1 and int(version[1]) >= 6: print(\" OK\") else:", "errors. Furthermore, the authors shall not be liable in any # # event", ", end=' ' ) if int(version[0]) == 1 and int(version[1]) >= 6: print(\"", "Numpy version detected %10s : \" %(versionLong) , end=' ' ) if int(version[0])", "OK\") else: print(\" Not OK\\n\\n Please install Scipy 0.9.x or higher\\n\") versionLong =", "= sys.platform # check python version versionLong = sys.version.split(' ') version = versionLong[0].split('.')", "~/.tcshrc :\\n\") print(\" setenv PYTHONPATH \"+path) print(\" alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\")", "# # https://github.com/jjcremmers/PyFEM # # # # The code is open source and", "versionLong.split('.') print(\" Matplotlib version detected %10s : \" %(versionLong) , end=' ' )", "' ) if int(version[0]) >= 1 and int(version[1]) >= 0: print(\" OK\") else:", "print(\" OK\") elif int(version[0]) >= 1 and int(version[1]) >= 0: print(\" OK\") else:", ": \" %(versionLong) , end=' ' ) if int(version[0]) == 0 and int(version[1])", "== \"darwin\": print(\"\\n MAC-OS INSTALLATION\") print(\" ===============================================================\\n\") print(\" Add the following line to", ") if int(version[0]) >= 5: print(\" OK\") else: print(\" Not OK\\n\\n Please install", "rights but do not guarantee that the code is # # free from", "<reponame>Konstantin8105/py4go ############################################################################ # This Python file is part of PyFEM, the code that", "for educational and scientific # # purposes only. If you use PyFEM in", "versionLong = PYQT_VERSION_STR version = versionLong.split('.') print(\" PyQt version detected %10s : \"", "print(\" alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See the", "the user manual for further instructions.\\n\\n\") elif osName[:6] == \"darwin\": print(\"\\n MAC-OS INSTALLATION\")", "= os.getcwd() if osName[:5] == \"linux\": print(\"\\n LINUX INSTALLATION\") print(\" ===============================================================\\n\") print(\" When", "successful!\") print(\" See the user manual for instructions.\\n\\n\") else: print(\"Operating system \",osName,\" not", "versionLong.split('.') print(\" Scipy version detected %10s : \" %(versionLong) , end=' ' )", "the following lines to\") print(\" ~/.cshrc or ~/.tcshrc :\\n\") print(\" setenv PYTHONPATH \"+path)", "purposes only. If you use PyFEM in your research, the developers would #", "INSTALLATION\") print(\" ===============================================================\\n\") #print(\" Add the following path to PYTHONPATH and PATH:\\n\") #print(\"", "int(version[1]) >= 6: print(\" OK\") else: print(\" Not OK\\n\\n Please install Numpy 1.6.x", "print(\" Matplotlib version detected %10s : \" %(versionLong) , end=' ' ) if", "elif int(version[0]) >= 1 and int(version[1]) >= 0: print(\" OK\") else: print(\" Not", "or higher\\n\") versionLong = PYQT_VERSION_STR version = versionLong.split('.') print(\" PyQt version detected %10s", "PyQt 5.x or higher\\n\") # get current path path = os.getcwd() if osName[:5]", "# # # # The code is open source and intended for educational", "to ~/.bashrc :\\n\") print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When using", "get current path path = os.getcwd() if osName[:5] == \"linux\": print(\"\\n LINUX INSTALLATION\")", "downloaded from the web-site: # # http://www.wiley.com/go/deborst # # # # A github", "\" %(versionLong) , end=' ' ) if int(version[0]) >= 5: print(\" OK\") else:", "and <NAME> # # <NAME> and Sons, 2012, ISBN 978-0470666449 # # #", "<NAME>, <NAME> and <NAME>. # # # # The latest stable version can", "is # # free from errors. Furthermore, the authors shall not be liable", "# # can be found here: # # https://github.com/jjcremmers/PyFEM # # # #", "versionLong = numpy.__version__ version = versionLong.split('.') print(\" Numpy version detected %10s : \"", "INSTALLATION\") print(\" ===============================================================\\n\") print(\" Add the following line to ~/.bashrc :\\n\") #print(' export", "grateful if you could cite the book. # # # # Disclaimer: #", "print(\" ===============================================================\\n\") #print(\" Add the following path to PYTHONPATH and PATH:\\n\") #print(\" \",path,\"\\n\")", "get operating system osName = sys.platform # check python version versionLong = sys.version.split('", "to PYTHONPATH and PATH:\\n\") #print(\" \",path,\"\\n\") print(\" ===============================================================\\n\") print(\" Installation successful!\") print(\" See", "#print(\" \",path,\"\\n\") print(\" ===============================================================\\n\") print(\" Installation successful!\") print(\" See the user manual for", "line to ~/.bashrc :\\n\") #print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\")", "%10s : \" %(versionLong) , end=' ' ) if int(version[0]) == 1 and", "\" %(versionLong) , end=' ' ) if int(version[0]) == 0 and int(version[1]) >=", "that accompanies the book: # # # # 'Non-Linear Finite Element Analysis of", "scipy version versionLong = scipy.__version__ version = versionLong.split('.') print(\" Scipy version detected %10s", "9: print(\" OK\") elif int(version[0]) >= 1 and int(version[1]) >= 0: print(\" OK\")", "the following path to PYTHONPATH and PATH:\\n\") #print(\" \",path,\"\\n\") print(\" ===============================================================\\n\") print(\" Installation", "authors shall not be liable in any # # event caused by the", "source and intended for educational and scientific # # purposes only. If you", "and int(version[1]) >= 0: print(\" OK\") else: print(\" Not OK\\n\\n Please install Scipy", "5.x or higher\\n\") # get current path path = os.getcwd() if osName[:5] ==", "else: print(\" Not OK\\n\\n Please install Matplotlib 1.0.x or higher\\n\") versionLong = PYQT_VERSION_STR", "any # # event caused by the use of the program. # ############################################################################", "part of PyFEM, the code that accompanies the book: # # # #", "book: # # # # 'Non-Linear Finite Element Analysis of Solids and Structures'", "migrated to Python 3.x\\n\") print(\" Install Pyhon 3.x\\n\") else: print(\" Not OK\\n\\n Please", "else: print(\" Not OK\\n\\n Please install Numpy 1.6.x or higher\\n\") # check scipy", "manual for further instructions.\\n\\n\") elif osName[:6] == \"darwin\": print(\"\\n MAC-OS INSTALLATION\") print(\" ===============================================================\\n\")", "code is open source and intended for educational and scientific # # purposes", "Please install PyQt 5.x or higher\\n\") # get current path path = os.getcwd()", "and scientific # # purposes only. If you use PyFEM in your research,", "end=' ' ) if int(version[0]) >= 5: print(\" OK\") else: print(\" Not OK\\n\\n", "Structures' # # <NAME>, <NAME>, <NAME> and <NAME> # # <NAME> and Sons,", "install Numpy 1.6.x or higher\\n\") # check scipy version versionLong = scipy.__version__ version", "PyQt version detected %10s : \" %(versionLong) , end=' ' ) if int(version[0])", "and PATH:\\n\") #print(\" \",path,\"\\n\") print(\" ===============================================================\\n\") print(\" Installation successful!\") print(\" See the user", "See the user manual for instructions.\\n\\n\") else: print(\"Operating system \",osName,\" not known.\") input(\"", "batfile.close() print(\"\\n WINDOWS INSTALLATION\") print(\" ===============================================================\\n\") #print(\" Add the following path to PYTHONPATH", "version versionLong = scipy.__version__ version = versionLong.split('.') print(\" Scipy version detected %10s :", "===============================================================\\n\") print(\" Add the following line to ~/.bashrc :\\n\") #print(' export PYTHONPATH=\"'+path+'\"') print(\"", "Scipy 0.9.x or higher\\n\") versionLong = matplotlib.__version__ version = versionLong.split('.') print(\" Matplotlib version", "Scipy version detected %10s : \" %(versionLong) , end=' ' ) if int(version[0])", "~/.bashrc :\\n\") #print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation", "batfile = open( 'pyfem.bat' , 'w' ) fexec = sys.executable if fexec[-5:] ==", "in your research, the developers would # # be grateful if you could", "not guarantee that the code is # # free from errors. Furthermore, the", ": \" %(versionLong) , end=' ' ) if int(version[0]) >= 5: print(\" OK\")", "path path = os.getcwd() if osName[:5] == \"linux\": print(\"\\n LINUX INSTALLATION\") print(\" ===============================================================\\n\")", "3 and int(version[1]) >= 6: print(\" OK\") elif int(version[0]) == 2: print(\" Please", "has been migrated to Python 3.x\\n\") print(\" Install Pyhon 3.x\\n\") else: print(\" Not", "0 and int(version[1]) >= 9: print(\" OK\") elif int(version[0]) >= 1 and int(version[1])", "#print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\"", "manual for instructions.\\n\\n\") else: print(\"Operating system \",osName,\" not known.\") input(\" Press Enter to", "if you could cite the book. # # # # Disclaimer: # #", "export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\" See", "higher\\n\") versionLong = PYQT_VERSION_STR version = versionLong.split('.') print(\" PyQt version detected %10s :", "would # # be grateful if you could cite the book. # #", "a bash shell, add the following line\") print(\" to ~/.bashrc :\\n\") print(' export", "\",path,\"\\n\") print(\" ===============================================================\\n\") print(\" Installation successful!\") print(\" See the user manual for instructions.\\n\\n\")", "using csh or tcsh add the following lines to\") print(\" ~/.cshrc or ~/.tcshrc", "developers would # # be grateful if you could cite the book. #", "# # free from errors. Furthermore, the authors shall not be liable in", "OK\") else: print(\" Not OK\\n\\n Please install Numpy 1.6.x or higher\\n\") # check", "Python 2.6.x or 2.7.x\\n\") # check numpy version versionLong = numpy.__version__ version =", "using a bash shell, add the following line\") print(\" to ~/.bashrc :\\n\") print('", "setenv PYTHONPATH \"+path) print(\" alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\")", "== \"w.exe\": fexec = fexec[:-5] + \".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close() print(\"\\n", "PyFEM in your research, the developers would # # be grateful if you", "OK\") else: print(\" Not OK\\n\\n Please install Matplotlib 1.0.x or higher\\n\") versionLong =", "else: print(\" Not OK\\n\\n Please install Python 2.6.x or 2.7.x\\n\") # check numpy", "and Sons, 2012, ISBN 978-0470666449 # # # # The code is written", "accompanies the book: # # # # 'Non-Linear Finite Element Analysis of Solids", "OK\") elif int(version[0]) >= 1 and int(version[1]) >= 0: print(\" OK\") else: print(\"", "succesful\") print(\" See the user manual for further instructions.\\n\\n\") elif osName[:3] == \"win\":", "if int(version[0]) == 3 and int(version[1]) >= 6: print(\" OK\") elif int(version[0]) ==", "1 and int(version[1]) >= 0: print(\" OK\") else: print(\" Not OK\\n\\n Please install", "date version of the code, # # can be found here: # #", "if fexec[-5:] == \"w.exe\": fexec = fexec[:-5] + \".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1')", "the book: # # # # 'Non-Linear Finite Element Analysis of Solids and", "== 0 and int(version[1]) >= 9: print(\" OK\") elif int(version[0]) >= 1 and", "following line\") print(\" to ~/.bashrc :\\n\") print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\")", "the book. # # # # Disclaimer: # # The authors reserve all", "version detected %10s : \" %(versionLong) , end=' ' ) if int(version[0]) >=", "of the program. # ############################################################################ import os,sys,numpy,scipy,matplotlib from PyQt5.Qt import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\")", "PYTHONPATH \"+path) print(\" alias pyfem 'python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\") print(\"", "'pyfem.bat' , 'w' ) fexec = sys.executable if fexec[-5:] == \"w.exe\": fexec =", "print(\" OK\") else: print(\" Not OK\\n\\n Please install Matplotlib 1.0.x or higher\\n\") versionLong", "= numpy.__version__ version = versionLong.split('.') print(\" Numpy version detected %10s : \" %(versionLong)", "line\") print(\" to ~/.bashrc :\\n\") print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\"", "print(\" ===============================================================\\n\") print(\" Installation successful!\") print(\" See the user manual for instructions.\\n\\n\") else:", "shall not be liable in any # # event caused by the use", "and int(version[1]) >= 6: print(\" OK\") else: print(\" Not OK\\n\\n Please install Numpy", "print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When using csh or tcsh add the following", "= versionLong.split('.') print(\" PyQt version detected %10s : \" %(versionLong) , end=' '", "%10s : \" %(versionLong) , end=' ' ) if int(version[0]) >= 5: print(\"", "use of the program. # ############################################################################ import os,sys,numpy,scipy,matplotlib from PyQt5.Qt import PYQT_VERSION_STR print(\"\\n", "%1') batfile.close() print(\"\\n WINDOWS INSTALLATION\") print(\" ===============================================================\\n\") #print(\" Add the following path to", "# # Disclaimer: # # The authors reserve all rights but do not", "code that accompanies the book: # # # # 'Non-Linear Finite Element Analysis", "'w' ) fexec = sys.executable if fexec[-5:] == \"w.exe\": fexec = fexec[:-5] +", "free from errors. Furthermore, the authors shall not be liable in any #", "# # event caused by the use of the program. # ############################################################################ import", "web-site: # # http://www.wiley.com/go/deborst # # # # A github repository, with the", "file is part of PyFEM, the code that accompanies the book: # #", "Not OK\\n\\n Please install Scipy 0.9.x or higher\\n\") versionLong = matplotlib.__version__ version =", "http://www.wiley.com/go/deborst # # # # A github repository, with the most up to", "Please install Python 2.6.x or 2.7.x\\n\") # check numpy version versionLong = numpy.__version__", "Installation successful!\") print(\" See the user manual for instructions.\\n\\n\") else: print(\"Operating system \",osName,\"", "versionLong = sys.version.split(' ') version = versionLong[0].split('.') print(\" Python version detected %10s :", "and int(version[1]) >= 6: print(\" OK\") elif int(version[0]) == 2: print(\" Please note", "by the use of the program. # ############################################################################ import os,sys,numpy,scipy,matplotlib from PyQt5.Qt import", "The code is open source and intended for educational and scientific # #", "# # http://www.wiley.com/go/deborst # # # # A github repository, with the most", "Add the following line to ~/.bashrc :\\n\") #print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3", "os,sys,numpy,scipy,matplotlib from PyQt5.Qt import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") # get operating system osName =", "<NAME> and <NAME>. # # # # The latest stable version can be", "See the user manual for further instructions.\\n\\n\") elif osName[:3] == \"win\": batfile =", "fexec[-5:] == \"w.exe\": fexec = fexec[:-5] + \".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close()", "user manual for further instructions.\\n\\n\") elif osName[:3] == \"win\": batfile = open( 'pyfem.bat'", "Not OK\\n\\n Please install PyQt 5.x or higher\\n\") # get current path path", "os.getcwd() if osName[:5] == \"linux\": print(\"\\n LINUX INSTALLATION\") print(\" ===============================================================\\n\") print(\" When using", "end=' ' ) if int(version[0]) == 0 and int(version[1]) >= 9: print(\" OK\")", "the use of the program. # ############################################################################ import os,sys,numpy,scipy,matplotlib from PyQt5.Qt import PYQT_VERSION_STR", "print(\" ===============================================================\\n\") print(\" Add the following line to ~/.bashrc :\\n\") #print(' export PYTHONPATH=\"'+path+'\"')", "the most up to date version of the code, # # can be", "= open( 'pyfem.bat' , 'w' ) fexec = sys.executable if fexec[-5:] == \"w.exe\":", "instructions.\\n\\n\") elif osName[:3] == \"win\": batfile = open( 'pyfem.bat' , 'w' ) fexec", "version = versionLong.split('.') print(\" PyQt version detected %10s : \" %(versionLong) , end='", "from the web-site: # # http://www.wiley.com/go/deborst # # # # A github repository,", "from errors. Furthermore, the authors shall not be liable in any # #", "# <NAME> and Sons, 2012, ISBN 978-0470666449 # # # # The code", ":\\n\") print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When using csh or", "fexec = sys.executable if fexec[-5:] == \"w.exe\": fexec = fexec[:-5] + \".exe\" print(fexec)", "# The authors reserve all rights but do not guarantee that the code", "== 1 and int(version[1]) >= 6: print(\" OK\") else: print(\" Not OK\\n\\n Please", "print(\" ===============================================================\\n\") print(\" When using a bash shell, add the following line\") print(\"", "= versionLong.split('.') print(\" Numpy version detected %10s : \" %(versionLong) , end=' '", ") if int(version[0]) >= 1 and int(version[1]) >= 0: print(\" OK\") else: print(\"", "== \"linux\": print(\"\\n LINUX INSTALLATION\") print(\" ===============================================================\\n\") print(\" When using a bash shell,", "Analysis of Solids and Structures' # # <NAME>, <NAME>, <NAME> and <NAME> #", "INSTALLATION\") print(\" ===============================================================\\n\") print(\" When using a bash shell, add the following line\")", "system osName = sys.platform # check python version versionLong = sys.version.split(' ') version", "github repository, with the most up to date version of the code, #", "osName = sys.platform # check python version versionLong = sys.version.split(' ') version =", "%(versionLong) , end=' ' ) if int(version[0]) >= 5: print(\" OK\") else: print(\"", "further instructions.\\n\\n\") elif osName[:3] == \"win\": batfile = open( 'pyfem.bat' , 'w' )", "version = versionLong.split('.') print(\" Numpy version detected %10s : \" %(versionLong) , end='", "numpy version versionLong = numpy.__version__ version = versionLong.split('.') print(\" Numpy version detected %10s", "of Solids and Structures' # # <NAME>, <NAME>, <NAME> and <NAME> # #", "0.9.x or higher\\n\") versionLong = matplotlib.__version__ version = versionLong.split('.') print(\" Matplotlib version detected", "# free from errors. Furthermore, the authors shall not be liable in any", "# https://github.com/jjcremmers/PyFEM # # # # The code is open source and intended", ":\\n\") #print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" ===============================================================\\n\") print(\" Installation succesful\")", "code is written by <NAME>, <NAME> and <NAME>. # # # # The", "osName[:5] == \"linux\": print(\"\\n LINUX INSTALLATION\") print(\" ===============================================================\\n\") print(\" When using a bash", "print(\" OK\") else: print(\" Not OK\\n\\n Please install PyQt 5.x or higher\\n\") #", "print(\" Not OK\\n\\n Please install Numpy 1.6.x or higher\\n\") # check scipy version", "end=' ' ) if int(version[0]) == 1 and int(version[1]) >= 6: print(\" OK\")", "or higher\\n\") versionLong = matplotlib.__version__ version = versionLong.split('.') print(\" Matplotlib version detected %10s", "'Non-Linear Finite Element Analysis of Solids and Structures' # # <NAME>, <NAME>, <NAME>", "program. # ############################################################################ import os,sys,numpy,scipy,matplotlib from PyQt5.Qt import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") # get", "could cite the book. # # # # Disclaimer: # # The authors", "operating system osName = sys.platform # check python version versionLong = sys.version.split(' ')", "3.x\\n\") print(\" Install Pyhon 3.x\\n\") else: print(\" Not OK\\n\\n Please install Python 2.6.x", "+ \".exe\" print(fexec) batfile.write(fexec+' '+path+'\\PyFEM.py %1') batfile.close() print(\"\\n WINDOWS INSTALLATION\") print(\" ===============================================================\\n\") #print(\"", "bash shell, add the following line\") print(\" to ~/.bashrc :\\n\") print(' export PYTHONPATH=\"'+path+'\"')", "and <NAME>. # # # # The latest stable version can be downloaded", "the code is # # free from errors. Furthermore, the authors shall not", "############################################################################ import os,sys,numpy,scipy,matplotlib from PyQt5.Qt import PYQT_VERSION_STR print(\"\\n ===============================================================\\n\") # get operating system", "print(' export PYTHONPATH=\"'+path+'\"') print(\" alias pyfem='python3 \"+path+\"/PyFEM.py'\\n\") print(\" When using csh or tcsh", "# # # A github repository, with the most up to date version", "tcsh add the following lines to\") print(\" ~/.cshrc or ~/.tcshrc :\\n\") print(\" setenv", "<NAME>. # # # # The latest stable version can be downloaded from", "versionLong = matplotlib.__version__ version = versionLong.split('.') print(\" Matplotlib version detected %10s : \"", "by <NAME>, <NAME> and <NAME>. # # # # The latest stable version", "if int(version[0]) == 0 and int(version[1]) >= 9: print(\" OK\") elif int(version[0]) >=" ]
[ "= data[0] device_data = data[1].split(\":\") if len(device_data) != 3: _LOGGER.warning( \"add_line_to_cache: Could not", "\"\"\" from __future__ import annotations import logging import os from typing import Final", ") return parameter = data[0] device_data = data[1].split(\":\") if len(device_data) != 3: _LOGGER.warning(", "device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ): return False if ( parameter in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END))", "VALUES paramset for which we don't create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = {", "Parameter that start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] = { \"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\",", ").get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return True return False def parameter_is_un_ignored( self, device_type: str, sub_type:", "\"\"\" Add line to from un ignore file to cache. Add data to", "-> bool: \"\"\"Return if a paramset is relevant.\"\"\" device_type_l = device_type.lower() sub_type_l =", "# paramset_key, parameter self._un_ignore_parameters_general: dict[str, set[str]] = { PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(), }", "self._init() def _init(self) -> None: \"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const\"\"\" for (", "and un_ignore_parameters_by_device from const\"\"\" for ( device_type, channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l =", "dict[str, list[str]] = { \"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\":", "[]) ): return True if ( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not None:", "\"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\", } # Ignore Parameter that", "EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", } # Parameters within the", "(PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning( \"add_line_to_cache: Could", "in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type, channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int, dict[str,", "= {\"LOWBAT\": 0} class ParameterVisibilityCache: \"\"\"Cache for parameter visibility.\"\"\" def __init__( self, central:", "\"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\",", "None, device_channel: int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Return if parameter", "FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING, ) as fptr: for line in fptr.readlines(): self._add_line_to_cache(line) except", "within hahomematic \"\"\" from __future__ import annotations import logging import os from typing", "= set() if device_type_l not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for channel_no in", "which we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD", "device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ): return False if ( parameter in _IGNORED_PARAMETERS", "parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l: if", "to cache. Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file. \"\"\" try: line", "and sub_type_l == d_type.lower()) or device_type_l.startswith(d_type.lower()) ): return True return False async def", "device_type_l.startswith(device_t): if parameter in un_ignore_parameters: return True return False def _add_line_to_cache(self, line: str)", "if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception:", "def parameter_is_un_ignored( self, device_type: str, sub_type: str | None, device_channel: int, paramset_key: str,", "FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from hahomematic.helpers import check_or_create_directory _LOGGER = logging.getLogger(__name__) #", "import hahomematic.central_unit as hm_central from hahomematic.const import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH,", "HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ], # To override ignore for HmIP-PCBS }", "if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel,", "= self._un_ignore_parameters_by_device_lower[ sub_type_l ] if parameter in un_ignore_parameters: return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for", "set[str] = { \"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", }", "dict[str, list[str]] = { device_type.lower(): parameters for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } #", "\"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", } # Parameters within the paramsets", "device_data[0].lower() channel_no = int(device_data[1]) paramset_key = device_data[2] if device_type not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type]", "hahomematic \"\"\" from __future__ import annotations import logging import os from typing import", "expected.\", line, ) return parameter = data[0] device_data = data[1].split(\":\") if len(device_data) !=", "HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], # HM-Sec-Key*", "device_type: str, sub_type: str | None, device_channel: int, paramset_key: str, parameter: str, )", "the paramsets for which we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"DLD\":", "\"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\",", "None: \"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const\"\"\" for ( device_type, channels_parameter, ) in", "if parameter not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return True return", "# Parameters within the VALUES paramset for which we don't create entities. _IGNORED_PARAMETERS:", "for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = { device_type.lower(): parameters", "not None and device_channel is not None: un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {}", "TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return device_type = device_data[0].lower() channel_no = int(device_data[1]) paramset_key = device_data[2]", "return device_type = device_data[0].lower() channel_no = int(device_data[1]) paramset_key = device_data[2] if device_type not", "line '%s' to un ignore cache.\", line ) def is_relevant_paramset( self, device_type: str,", "'%s' to un ignore cache.\", line ) def is_relevant_paramset( self, device_type: str, sub_type:", "== PARAMSET_KEY_MASTER: for ( d_type, channel_nos, ) in self._relevant_master_paramsets_by_device.items(): if device_channel in channel_nos", "Could not add line '%s' to un ignore cache. Only one @ expected.\",", "False def parameter_is_un_ignored( self, device_type: str, sub_type: str | None, device_channel: int, paramset_key:", "# Ignore Parameter that start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] = { \"ADJUSTING\", \"ERR_TTM\", \"ERROR\",", "} self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = { device_type.lower(): parameters for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items()", "channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] = {} if", "Final = self._central.central_config.storage_folder # paramset_key, parameter self._un_ignore_parameters_general: dict[str, set[str]] = { PARAMSET_KEY_MASTER: set(),", "None and paramset_key == PARAMSET_KEY_MASTER: for ( d_type, channel_nos, ) in self._relevant_master_paramsets_by_device.items(): if", "add parameter:paramset_key data = line.split(\":\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not", "un_ignore_parameters: dict[str, set[str]] = {} if device_type_l is not None and device_channel is", "add line '%s' to un ignore cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line,", "if ( paramset_key not in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] =", "arguments expected: e.g. TEMPERATURE:VALUES.\", line, ) return paramset_key = data[0] parameter = data[1]", "set() self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif \":\" in line: # add parameter:paramset_key data =", "device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if parameter in self._un_ignore_parameters_general[paramset_key]: return", "in line: # add parameter@devicetype:channel_no:paramset_key data = line.split(\"@\") if len(data) != 2: _LOGGER.warning(", "device_channel: int, ) -> bool: \"\"\"Return if a paramset is relevant.\"\"\" device_type_l =", "__init__( self, central: hm_central.CentralUnit, ): self._central: Final = central self._storage_folder: Final = self._central.central_config.storage_folder", "{} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return True return False def parameter_is_un_ignored( self, device_type: str,", "_LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache. Only one", "d_type.lower() or (sub_type_l and sub_type_l == d_type.lower()) or device_type_l.startswith(d_type.lower()) ): return True return", "2: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache. 2", "= data[0] parameter = data[1] if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: #", "\"WORKING\", } # Ignore Parameter that start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] = { \"ADJUSTING\",", "return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t, un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t):", "PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str] = { EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\",", "logging import os from typing import Final import hahomematic.central_unit as hm_central from hahomematic.const", "\"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {\"LOWBAT\": 0}", "dict[ str, dict[int, dict[str, set[str]]] ] = {} # device_type, channel_no self._relevant_master_paramsets_by_device: dict[str,", "entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\",", "13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str] = {", "if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l ] if parameter", "data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file. \"\"\" try: line = line.strip() if", "\"\"\"Load custom un ignore parameters from disk.\"\"\" def _load() -> None: if not", "self._relevant_master_paramsets_by_device: dict[str, set[int]] = {} self._init() def _init(self) -> None: \"\"\"Init relevant_master_paramsets_by_device and", "@ expected.\", line, ) return parameter = data[0] device_data = data[1].split(\":\") if len(device_data)", "not add line '%s' to un ignore cache. Only one @ expected.\", line,", "3: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache. 4", "] = {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self, device_type: str, device_channel:", "with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] = { \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\",", "if parameter in self._un_ignore_parameters_general[paramset_key]: return True if parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel,", "not in un_ignore_parameters: un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def ignore_parameter( self, device_type:", ") -> dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l = device_type.lower() un_ignore_parameters: dict[str, set[str]] =", "paramset_key not in un_ignore_parameters: un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def ignore_parameter( self,", "str, device_channel: int, ) -> bool: \"\"\"Return if a paramset is relevant.\"\"\" device_type_l", "in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or sub_type_l", "HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ], # To", "Exception as ex: _LOGGER.warning( \"load: Could not read unignore file %s\", ex.args, )", "device_type: str, sub_type: str | None, paramset_key: str, device_channel: int, ) -> bool:", "if ( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not None: if accept_channel != device_channel:", "# device_type, channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]] = {} self._init() def _init(self) -> None:", "= data[1].split(\":\") if len(device_data) != 3: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s'", "\"@\" in line: # add parameter@devicetype:channel_no:paramset_key data = line.split(\"@\") if len(data) != 2:", "set()): return True if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l", "which we don't create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"LOWBAT\": [ \"HM-LC-Sw1-FM\",", "PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str] = { EVENT_CONFIG_PENDING,", "in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter)", "device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if paramset_key", "def __init__( self, central: hm_central.CentralUnit, ): self._central: Final = central self._storage_folder: Final =", "load(self) -> None: \"\"\"Load custom un ignore parameters from disk.\"\"\" def _load() ->", "create entities. _IGNORED_PARAMETERS: set[str] = { \"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\",", "): self._central: Final = central self._storage_folder: Final = self._central.central_config.storage_folder # paramset_key, parameter self._un_ignore_parameters_general:", "if parameter can be ignored.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type", "if device_channel in channel_nos and ( device_type_l == d_type.lower() or (sub_type_l and sub_type_l", "Final = central self._storage_folder: Final = self._central.central_config.storage_folder # paramset_key, parameter self._un_ignore_parameters_general: dict[str, set[str]]", "in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return True return False def parameter_is_un_ignored(", "\"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } # Ignore Parameter that end with", "-> bool: \"\"\"Check if parameter can be ignored.\"\"\" device_type_l = device_type.lower() sub_type_l =", "if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: return True if device_channel is", "parameter = data[0] device_data = data[1].split(\":\") if len(device_data) != 3: _LOGGER.warning( \"add_line_to_cache: Could", "if device_channel is not None and paramset_key == PARAMSET_KEY_MASTER: for ( d_type, channel_nos,", "in self._relevant_master_paramsets_by_device.items(): if device_channel in channel_nos and ( device_type_l == d_type.lower() or (sub_type_l", "if ( parameter in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, []))", "sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ): return False if ( parameter in _IGNORED_PARAMETERS or", "== PARAMSET_KEY_MASTER: if parameter not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return", "{} # device_type, channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]] = {} self._init() def _init(self) ->", "un ignore cache. Only one @ expected.\", line, ) return parameter = data[0]", "if len(device_data) != 3: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un", "in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type ]", "\"add_line_to_cache: Could not add line '%s' to un ignore cache.\", line ) def", "No file found in %s\", self._storage_folder, ) return try: with open( file=os.path.join( self._storage_folder,", "if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t, un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if parameter", "Ignore Parameter that start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] = { \"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\",", "][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self, device_type: str, device_channel: int ) -> dict[str, set[str]]: \"\"\"Return", "is on un_ignore list\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else", "\"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\", } # Ignore Parameter that start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str]", "if paramset_key == PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ): return", "device_type not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[", ") or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, []) ): return True if ( accept_channel :=", "\":\" in line: # add parameter:paramset_key data = line.split(\":\") if len(data) != 2:", "== d_type.lower()) or device_type_l.startswith(d_type.lower()) ): return True return False async def load(self) ->", "_IGNORED_PARAMETERS_WILDCARDS_START: set[str] = { \"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\",", "device_channel: int ) -> dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l = device_type.lower() un_ignore_parameters: dict[str,", "[ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [", "parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type, channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int,", "{} if ( paramset_key not in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key]", "sub_type_l = sub_type.lower() if sub_type else None if parameter in self._un_ignore_parameters_general[paramset_key]: return True", "( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not None: if accept_channel != device_channel: return", "\"\"\"Return un_ignore_parameters\"\"\" device_type_l = device_type.lower() un_ignore_parameters: dict[str, set[str]] = {} if device_type_l is", "\"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", } # Parameters within the paramsets for", "][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if paramset_key == PARAMSET_KEY_MASTER: if device_type not", "e.g. TEMPERATURE:VALUES.\", line, ) return paramset_key = data[0] parameter = data[1] if paramset_key", ") def is_relevant_paramset( self, device_type: str, sub_type: str | None, paramset_key: str, device_channel:", "device_type: str, device_channel: int ) -> dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l = device_type.lower()", "channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower() channel_nos, parameter = channels_parameter if device_type_l", "is not None and device_channel is not None: un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l,", "{} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l: if parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l,", "we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD \"SD\":", "as ex: _LOGGER.warning( \"load: Could not read unignore file %s\", ex.args, ) await", "[ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ], # To override ignore for HmIP-PCBS } # Parameters", "self._un_ignore_parameters_general.items(): if paramset_key not in un_ignore_parameters: un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def", "not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load: No file found in %s\", self._storage_folder,", "about parameter visibility within hahomematic \"\"\" from __future__ import annotations import logging import", "{}).get(PARAMSET_KEY_MASTER, []): return True return False def parameter_is_un_ignored( self, device_type: str, sub_type: str", "\"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\",", "un ignore cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return device_type =", "expected: e.g. TEMPERATURE:VALUES.\", line, ) return paramset_key = data[0] parameter = data[1] if", "} # Parameters within the paramsets for which we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str,", "\"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\",", "= {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self, device_type: str, device_channel: int", "paramset_key not in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][", "None, paramset_key: str, device_channel: int, ) -> bool: \"\"\"Return if a paramset is", "list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int, dict[str, set[str]]] ] = {} # device_type, channel_no", "9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str] =", "self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING, ) as fptr: for line in fptr.readlines(): self._add_line_to_cache(line)", "# HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"], # HM-Sec-Win*", "if parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l:", "{ \"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\",", "} # Ignore Parameter that end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] = { \"OVERFLOW\", \"OVERHEAT\",", "line: # add parameter:paramset_key data = line.split(\":\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache:", "import logging import os from typing import Final import hahomematic.central_unit as hm_central from", "ignore cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.\", line, ) return paramset_key = data[0]", "-> dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l = device_type.lower() un_ignore_parameters: dict[str, set[str]] = {}", "in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] = {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no", "device_type_l.startswith(d_type.lower()) ): return True return False async def load(self) -> None: \"\"\"Load custom", "= { device_type.lower(): parameters for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type, channel_no,", ") is not None: if accept_channel != device_channel: return True if paramset_key ==", "Parameters within the VALUES paramset for which we don't create entities. _IGNORED_PARAMETERS: set[str]", "channel_no ) elif \":\" in line: # add parameter:paramset_key data = line.split(\":\") if", "or (sub_type_l and sub_type_l == d_type.lower()) or device_type_l.startswith(d_type.lower()) ): return True return False", "PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ): return False if (", "\"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\",", "channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] = {PARAMSET_KEY_MASTER: set()}", "from file. \"\"\" try: line = line.strip() if \"@\" in line: # add", "PARAMSET_KEY_MASTER: if device_type not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif", "mode=\"r\", encoding=DEFAULT_ENCODING, ) as fptr: for line in fptr.readlines(): self._add_line_to_cache(line) except Exception as", "} _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {\"LOWBAT\": 0} class ParameterVisibilityCache: \"\"\"Cache for parameter visibility.\"\"\"", "\"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\", } # Ignore Parameter that start with _IGNORED_PARAMETERS_WILDCARDS_START:", "| None, device_channel: int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Check if", "FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load: No file found in %s\", self._storage_folder, ) return try:", "class ParameterVisibilityCache: \"\"\"Cache for parameter visibility.\"\"\" def __init__( self, central: hm_central.CentralUnit, ): self._central:", "def ignore_parameter( self, device_type: str, sub_type: str | None, device_channel: int, paramset_key: str,", "self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning( \"add_line_to_cache: Could not add", "if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] =", "\"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\",", "parameter: str, ) -> bool: \"\"\"Check if parameter can be ignored.\"\"\" device_type_l =", "in self._un_ignore_parameters_general[paramset_key]: return True if parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(paramset_key, set()):", "hm_central from hahomematic.const import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE,", "parameter in un_ignore_parameters: return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t, un_ignore_parameters, ) in", "device_channel in channel_nos and ( device_type_l == d_type.lower() or (sub_type_l and sub_type_l ==", "in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no", "line.split(\":\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to", "EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from hahomematic.helpers import check_or_create_directory _LOGGER", "in channel_nos and ( device_type_l == d_type.lower() or (sub_type_l and sub_type_l == d_type.lower())", "start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] = { \"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\",", "\"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", } # Parameters within the paramsets for which", "2: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache. Only", "{} if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ]", "logging.getLogger(__name__) # {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = { \"HmIPW-DRBL4\": ({1, 5,", "paramset_key == PARAMSET_KEY_MASTER: if device_type not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add( channel_no", "self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self, device_type: str, device_channel: int ) -> dict[str,", "channel_nos and ( device_type_l == d_type.lower() or (sub_type_l and sub_type_l == d_type.lower()) or", "\"load: No file found in %s\", self._storage_folder, ) return try: with open( file=os.path.join(", "4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return device_type = device_data[0].lower() channel_no =", ") return paramset_key = data[0] parameter = data[1] if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER):", "({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str] = { EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH,", "{device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = { \"HmIPW-DRBL4\": ({1, 5, 9, 13},", "in device_types] for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = {", "if paramset_key == PARAMSET_KEY_MASTER: if device_type not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add(", "elif \":\" in line: # add parameter:paramset_key data = line.split(\":\") if len(data) !=", "!= device_channel: return True if paramset_key == PARAMSET_KEY_MASTER: if parameter not in self._un_ignore_parameters_by_device_paramset_key.get(", "\"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\",", "self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int, dict[str, set[str]]] ] = {} # device_type, channel_no self._relevant_master_paramsets_by_device:", "to un ignore cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.\", line, ) return paramset_key", "\"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ], # To override ignore for HmIP-PCBS } #", "device_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l: if parameter in self._un_ignore_parameters_by_device_paramset_key.get(", "= device_data[0].lower() channel_no = int(device_data[1]) paramset_key = device_data[2] if device_type not in self._un_ignore_parameters_by_device_paramset_key:", "self, device_type: str, sub_type: str | None, device_channel: int, paramset_key: str, parameter: str,", "== PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ): return False if", "TEMPERATURE:VALUES.\", line, ) return paramset_key = data[0] parameter = data[1] if paramset_key in", "\"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } # Ignore Parameter that end", "{} if device_type_l is not None and device_channel is not None: un_ignore_parameters =", "= set() self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif \":\" in line: # add parameter:paramset_key data", "hm_central.CentralUnit, ): self._central: Final = central self._storage_folder: Final = self._central.central_config.storage_folder # paramset_key, parameter", "channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]] = {} self._init() def _init(self) -> None: \"\"\"Init relevant_master_paramsets_by_device", "cache.\", line ) def is_relevant_paramset( self, device_type: str, sub_type: str | None, paramset_key:", "= {} self._init() def _init(self) -> None: \"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const\"\"\"", "EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", } # Parameters within the VALUES paramset for", "set[str] = { EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", } #", "True if sub_type_l: if parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return", "else None if paramset_key == PARAMSET_KEY_VALUES: return True if device_channel is not None", "we don't create entities. _IGNORED_PARAMETERS: set[str] = { \"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\",", "device within the VALUES paramset for which we don't create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str,", "\"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } # Ignore Parameter that", "if parameter in un_ignore_parameters: return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t, un_ignore_parameters, )", "'%s' to un ignore cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.\", line, ) return", "PARAMSET_KEY_VALUES, ) from hahomematic.helpers import check_or_create_directory _LOGGER = logging.getLogger(__name__) # {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE:", "\"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\",", "str, ) -> bool: \"\"\"Return if parameter is on un_ignore list\"\"\" device_type_l =", "and paramset_key == PARAMSET_KEY_MASTER: for ( d_type, channel_nos, ) in self._relevant_master_paramsets_by_device.items(): if device_channel", "channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ):", "ignore cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return device_type = device_data[0].lower()", "To override ignore for HmIP-PCBS } # Parameters by device within the VALUES", "device_type not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif \":\" in", "in self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l ] if parameter in un_ignore_parameters: return True", "data[0] device_data = data[1].split(\":\") if len(device_data) != 3: _LOGGER.warning( \"add_line_to_cache: Could not add", "str, sub_type: str | None, paramset_key: str, device_channel: int, ) -> bool: \"\"\"Return", "device_channel is not None: un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}) )", "return False if ( parameter in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith(", "get_un_ignore_parameters( self, device_type: str, device_channel: int ) -> dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l", "self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l ] if parameter in un_ignore_parameters: return True if", "for ( device_t, un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if parameter in un_ignore_parameters:", "return if not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load: No file found in", "sub_type_l: if parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if", "{}).get(paramset_key, set()): return True if sub_type_l: if parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel,", "data[0] parameter = data[1] if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add", "_load() -> None: if not check_or_create_directory(self._storage_folder): return if not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ):", "def _load() -> None: if not check_or_create_directory(self._storage_folder): return if not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS)", "parameter in self._un_ignore_parameters_general[paramset_key]: return True if parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(paramset_key,", "async def load(self) -> None: \"\"\"Load custom un ignore parameters from disk.\"\"\" def", "None: un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}) ) for ( paramset_key,", "): return True if ( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not None: if", "hahomematic.central_unit as hm_central from hahomematic.const import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING,", "device_type_l = device_type.lower() un_ignore_parameters: dict[str, set[str]] = {} if device_type_l is not None", "21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str] = { EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE,", "({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS:", "Module about parameter visibility within hahomematic \"\"\" from __future__ import annotations import logging", "= { \"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\",", "file. \"\"\" try: line = line.strip() if \"@\" in line: # add parameter@devicetype:channel_no:paramset_key", "\"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ], # To override ignore for", "return True return False def parameter_is_un_ignored( self, device_type: str, sub_type: str | None,", "device_type.lower() channel_nos, parameter = channels_parameter if device_type_l not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set()", "\"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", } # Parameters within the", "line: # add parameter@devicetype:channel_no:paramset_key data = line.split(\"@\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache:", "paramset is relevant.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None", "d_type, channel_nos, ) in self._relevant_master_paramsets_by_device.items(): if device_channel in channel_nos and ( device_type_l ==", "= sub_type.lower() if sub_type else None if parameter in self._un_ignore_parameters_general[paramset_key]: return True if", "True if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l ] if", "be ignored.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if", "parameter@devicetype:channel_no:paramset_key data = line.split(\"@\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add", "cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return device_type = device_data[0].lower() channel_no", "that end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] = { \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\",", "self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ): return False if ( parameter in", "set[str]] = { PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower: dict[str, list[str]] = {", "True if paramset_key == PARAMSET_KEY_MASTER: if parameter not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel,", "\"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\",", "with open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING, ) as fptr: for line", "( device_type_l == d_type.lower() or (sub_type_l and sub_type_l == d_type.lower()) or device_type_l.startswith(d_type.lower()) ):", "parameter: str, ) -> bool: \"\"\"Return if parameter is on un_ignore list\"\"\" device_type_l", "within the VALUES paramset for which we don't create entities. _IGNORED_PARAMETERS: set[str] =", "sub_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower:", "], # To override ignore for HmIP-PCBS } # Parameters by device within", "paramset_key == PARAMSET_KEY_MASTER: if parameter not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []):", "in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if parameter in un_ignore_parameters: return True return False def", "# device_type, channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int, dict[str, set[str]]] ] =", "ignore parameters from disk.\"\"\" def _load() -> None: if not check_or_create_directory(self._storage_folder): return if", "\"\"\"Check if parameter can be ignored.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if", "tuple[set[int], str]] = { \"HmIPW-DRBL4\": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13,", "self._relevant_master_paramsets_by_device.items(): if device_channel in channel_nos and ( device_type_l == d_type.lower() or (sub_type_l and", "self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no not in", "_LOGGER = logging.getLogger(__name__) # {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = { \"HmIPW-DRBL4\":", "# add parameter@devicetype:channel_no:paramset_key data = line.split(\"@\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could", "# Ignore Parameter that end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] = { \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\",", "int] = {\"LOWBAT\": 0} class ParameterVisibilityCache: \"\"\"Cache for parameter visibility.\"\"\" def __init__( self,", "in self._un_ignore_parameters_general.items(): if paramset_key not in un_ignore_parameters: un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters", "if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un", "[\"DIRECTION\", \"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ], # To override ignore", "\"add_line_to_cache: Could not add line '%s' to un ignore cache. 2 arguments expected:", "device_types] for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = { device_type.lower():", "def get_un_ignore_parameters( self, device_type: str, device_channel: int ) -> dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\"", "self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = { device_type.lower(): parameters for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() }", "def load(self) -> None: \"\"\"Load custom un ignore parameters from disk.\"\"\" def _load()", "\"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str,", "\"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const\"\"\" for ( device_type, channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items():", "channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int, dict[str, set[str]]] ] = {} #", "-> None: if not check_or_create_directory(self._storage_folder): return if not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug(", "PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from hahomematic.helpers import check_or_create_directory _LOGGER = logging.getLogger(__name__) # {device_type: channel_no}", "check_or_create_directory _LOGGER = logging.getLogger(__name__) # {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = {", "const\"\"\" for ( device_type, channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower() channel_nos, parameter", ") in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower() channel_nos, parameter = channels_parameter if device_type_l not", "for channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ]", "\"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\",", "add parameter@devicetype:channel_no:paramset_key data = line.split(\"@\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not", "Parameters within the paramsets for which we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] =", "hahomematic.const import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES,", "if device_type not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if ( channel_no not in", "parameter is on un_ignore list\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type", "open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING, ) as fptr: for line in", "self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif \":\" in line: # add parameter:paramset_key data = line.split(\":\")", "if sub_type_l: if parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True", "= { \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\", } # Ignore", "{ \"HmIPW-DRBL4\": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE),", "{ \"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", } # Parameters", "# To override ignore for HmIP-PCBS } # Parameters by device within the", "list[str]] = { device_type.lower(): parameters for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type,", "{ PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower: dict[str, list[str]] = { parameter: [device_type.lower()", "bool: \"\"\"Return if parameter is on un_ignore list\"\"\" device_type_l = device_type.lower() sub_type_l =", "device_type_l = device_type.lower() channel_nos, parameter = channels_parameter if device_type_l not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l]", "\"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\",", "\"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ],", "\"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\",", "parameter = data[1] if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add parameter", "] = {} # device_type, channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]] = {} self._init() def", "str, dict[int, dict[str, set[str]]] ] = {} # device_type, channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]]", "from hahomematic.helpers import check_or_create_directory _LOGGER = logging.getLogger(__name__) # {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int],", "self, central: hm_central.CentralUnit, ): self._central: Final = central self._storage_folder: Final = self._central.central_config.storage_folder #", "import Final import hahomematic.central_unit as hm_central from hahomematic.const import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR,", "in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = { device_type.lower(): parameters for device_type, parameters", "self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if paramset_key == PARAMSET_KEY_MASTER: if device_type not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type]", "_LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache. 2 arguments", "\"HmIP-PS\", \"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {\"LOWBAT\": 0} class ParameterVisibilityCache: \"\"\"Cache", ").get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters =", "list\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if parameter", "and un_ignore_parameters_by_device from file. \"\"\" try: line = line.strip() if \"@\" in line:", "check_or_create_directory(self._storage_folder): return if not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load: No file found", "\"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {\"LOWBAT\": 0} class", "in %s\", self._storage_folder, ) return try: with open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\",", "dict[str, set[str]] = { PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower: dict[str, list[str]] =", "\"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\",", "[\"ERROR_JAMMED\"], # HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"], #", "for ( device_type, channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower() channel_nos, parameter =", ").get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l: if parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {}", "set()): return True if sub_type_l: if parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel, {}).get(paramset_key,", "Parameter that end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] = { \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\",", "list[str]] = { parameter: [device_type.lower() for device_type in device_types] for parameter, device_types in", "parameter in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or", "\"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\",", "sub_type_l = sub_type.lower() if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: return True", "= {} for channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[", "{\"LOWBAT\": 0} class ParameterVisibilityCache: \"\"\"Cache for parameter visibility.\"\"\" def __init__( self, central: hm_central.CentralUnit,", "for ( d_type, channel_nos, ) in self._relevant_master_paramsets_by_device.items(): if device_channel in channel_nos and (", "\"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {\"LOWBAT\":", "return True if parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True", "paramset_key: str, parameter: str, ) -> bool: \"\"\"Check if parameter can be ignored.\"\"\"", "and ( device_type_l == d_type.lower() or (sub_type_l and sub_type_l == d_type.lower()) or device_type_l.startswith(d_type.lower())", "Could not add line '%s' to un ignore cache. 2 arguments expected: e.g.", "paramset_key = data[0] parameter = data[1] if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else:", "set(), } self._ignore_parameters_by_device_lower: dict[str, list[str]] = { parameter: [device_type.lower() for device_type in device_types]", "\"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", }", "str, sub_type: str | None, device_channel: int, paramset_key: str, parameter: str, ) ->", "= self._central.central_config.storage_folder # paramset_key, parameter self._un_ignore_parameters_general: dict[str, set[str]] = { PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES:", "return True if paramset_key == PARAMSET_KEY_MASTER: if parameter not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {}", "_LOGGER.debug( \"load: No file found in %s\", self._storage_folder, ) return try: with open(", "-> bool: \"\"\"Return if parameter is on un_ignore list\"\"\" device_type_l = device_type.lower() sub_type_l", "device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] = {} if ( paramset_key not in", "parameter:paramset_key data = line.split(\":\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add", "], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\",", "self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if parameter in un_ignore_parameters: return True return False def _add_line_to_cache(self,", "paramset_key == PARAMSET_KEY_MASTER: for ( d_type, channel_nos, ) in self._relevant_master_paramsets_by_device.items(): if device_channel in", "\"ACTIVITY_STATE\", \"DIRECTION\", } # Parameters within the VALUES paramset for which we don't", "\"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\",", "ignored.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if paramset_key", "cache. Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file. \"\"\" try: line =", "paramset for which we don't create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"LOWBAT\":", "un ignore cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.\", line, ) return paramset_key =", "self._add_line_to_cache(line) except Exception as ex: _LOGGER.warning( \"load: Could not read unignore file %s\",", "if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ): return False if ( parameter", "if a paramset is relevant.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type", "\"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", } # Parameters within the paramsets for which we create", "Parameters by device within the VALUES paramset for which we don't create entities.", "dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l = device_type.lower() un_ignore_parameters: dict[str, set[str]] = {} if", "line, ) return paramset_key = data[0] parameter = data[1] if paramset_key in (PARAMSET_KEY_VALUES,", "not in self._un_ignore_parameters_by_device_paramset_key[ device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] = {} if (", "paramset_key: str, device_channel: int, ) -> bool: \"\"\"Return if a paramset is relevant.\"\"\"", "= central self._storage_folder: Final = self._central.central_config.storage_folder # paramset_key, parameter self._un_ignore_parameters_general: dict[str, set[str]] =", "_IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = { device_type.lower(): parameters for device_type, parameters in", "is not None: un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}) ) for", "Add line to from un ignore file to cache. Add data to relevant_master_paramsets_by_device", "we don't create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\",", "device_type, channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int, dict[str, set[str]]] ] = {}", "} HIDDEN_PARAMETERS: set[str] = { EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\",", "channel_no ][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if paramset_key == PARAMSET_KEY_MASTER: if device_type", "not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if (", "set[str]]] ] = {} # device_type, channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]] = {} self._init()", "\"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", } # Parameters within the paramsets for which we", "cache. Only one @ expected.\", line, ) return parameter = data[0] device_data =", ") -> bool: \"\"\"Return if a paramset is relevant.\"\"\" device_type_l = device_type.lower() sub_type_l", "int ) -> dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l = device_type.lower() un_ignore_parameters: dict[str, set[str]]", "paramset_key, un_ignore_params, ) in self._un_ignore_parameters_general.items(): if paramset_key not in un_ignore_parameters: un_ignore_parameters[paramset_key] = set()", "try: line = line.strip() if \"@\" in line: # add parameter@devicetype:channel_no:paramset_key data =", "self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no not", "str, device_channel: int ) -> dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l = device_type.lower() un_ignore_parameters:", "'%s' to un ignore cache. Only one @ expected.\", line, ) return parameter", "!= 3: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache.", "# Parameters by device within the VALUES paramset for which we don't create", "un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def ignore_parameter( self, device_type: str, sub_type: str | None, device_channel:", "\"STATUS_FLAG\", \"WEEK_PROGRAM\", } # Parameters within the paramsets for which we create entities.", "line, ) return device_type = device_data[0].lower() channel_no = int(device_data[1]) paramset_key = device_data[2] if", "to un ignore cache. Only one @ expected.\", line, ) return parameter =", "\"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\",", "for which we don't create entities. _IGNORED_PARAMETERS: set[str] = { \"AES_KEY\", \"BOOST_TIME\", \"BOOT\",", "for which we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"DLD\": [\"ERROR_JAMMED\"], #", "HmIP-PCBS } # Parameters by device within the VALUES paramset for which we", "relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const\"\"\" for ( device_type, channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l", "= ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}) ) for ( paramset_key, un_ignore_params, )", "[\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"],", "from hahomematic.const import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER,", "): return False if ( parameter in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or", "\"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL:", "= { PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower: dict[str, list[str]] = { parameter:", "set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def ignore_parameter( self, device_type: str, sub_type: str | None,", "%s\", self._storage_folder, ) return try: with open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING,", "as fptr: for line in fptr.readlines(): self._add_line_to_cache(line) except Exception as ex: _LOGGER.warning( \"load:", "\"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\",", "device_data[2] if device_type not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if ( channel_no not", "os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load: No file found in %s\", self._storage_folder, ) return", "file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING, ) as fptr: for line in fptr.readlines():", "not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set() if device_type_l not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] =", "= { \"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\",", "not in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no", "): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] = {} if ( paramset_key not in self._un_ignore_parameters_by_device_paramset_key[ device_type", "to un ignore cache.\", line ) def is_relevant_paramset( self, device_type: str, sub_type: str", "device_type_l, {} ).get(device_channel, {}) ) for ( paramset_key, un_ignore_params, ) in self._un_ignore_parameters_general.items(): if", "PARAMSET_KEY_MASTER: for ( d_type, channel_nos, ) in self._relevant_master_paramsets_by_device.items(): if device_channel in channel_nos and", "__future__ import annotations import logging import os from typing import Final import hahomematic.central_unit", "[ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\",", "( d_type, channel_nos, ) in self._relevant_master_paramsets_by_device.items(): if device_channel in channel_nos and ( device_type_l", "paramset_key = device_data[2] if device_type not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if (", "or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, []) ):", "line '%s' to un ignore cache. Only one @ expected.\", line, ) return", "from disk.\"\"\" def _load() -> None: if not check_or_create_directory(self._storage_folder): return if not os.path.exists(", "# {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = { \"HmIPW-DRBL4\": ({1, 5, 9,", "PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower: dict[str, list[str]] = { parameter: [device_type.lower() for device_type in", "EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from hahomematic.helpers import check_or_create_directory _LOGGER = logging.getLogger(__name__)", "parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, []) ): return", "not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type", "import check_or_create_directory _LOGGER = logging.getLogger(__name__) # {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] =", "entities. _IGNORED_PARAMETERS: set[str] = { \"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\",", "for ( paramset_key, un_ignore_params, ) in self._un_ignore_parameters_general.items(): if paramset_key not in un_ignore_parameters: un_ignore_parameters[paramset_key]", "in un_ignore_parameters: return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t, un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items():", "\"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] =", "within the VALUES paramset for which we don't create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]]", "{ EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", } # Parameters within", "sub_type else None if paramset_key == PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key,", "-> None: \"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const\"\"\" for ( device_type, channels_parameter, )", "data = line.split(\":\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add line", ":= _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not None: if accept_channel != device_channel: return True if", "sub_type_l == d_type.lower()) or device_type_l.startswith(d_type.lower()) ): return True return False async def load(self)", "str | None, device_channel: int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Check", "un_ignore_parameters: return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t, un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items(): if", "!= 2: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache.", "\"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } # Ignore Parameter that end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] = {", "\"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\", } # Ignore Parameter that start", "parameter visibility.\"\"\" def __init__( self, central: hm_central.CentralUnit, ): self._central: Final = central self._storage_folder:", ") in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if parameter in un_ignore_parameters: return True return False", "un_ignore_parameters: un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def ignore_parameter( self, device_type: str, sub_type:", "paramset for which we don't create entities. _IGNORED_PARAMETERS: set[str] = { \"AES_KEY\", \"BOOST_TIME\",", "\"\"\"Return if a paramset is relevant.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if", "in un_ignore_parameters: un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def ignore_parameter( self, device_type: str,", "not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return True return False def", "device_type = device_data[0].lower() channel_no = int(device_data[1]) paramset_key = device_data[2] if device_type not in", "is not None: if accept_channel != device_channel: return True if paramset_key == PARAMSET_KEY_MASTER:", "{}) ) for ( paramset_key, un_ignore_params, ) in self._un_ignore_parameters_general.items(): if paramset_key not in", "{} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters", "\"\"\"Return if parameter is on un_ignore list\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower()", "device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: return", "17, 21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str] = { EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING,", "\"\"\" Module about parameter visibility within hahomematic \"\"\" from __future__ import annotations import", "not add line '%s' to un ignore cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.\",", "Only one @ expected.\", line, ) return parameter = data[0] device_data = data[1].split(\":\")", "self._storage_folder: Final = self._central.central_config.storage_folder # paramset_key, parameter self._un_ignore_parameters_general: dict[str, set[str]] = { PARAMSET_KEY_MASTER:", "paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning(", "parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, [])", "( paramset_key, un_ignore_params, ) in self._un_ignore_parameters_general.items(): if paramset_key not in un_ignore_parameters: un_ignore_parameters[paramset_key] =", "device_type_l not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set() if device_type_l not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l]", "from typing import Final import hahomematic.central_unit as hm_central from hahomematic.const import ( DEFAULT_ENCODING,", "channel_nos, parameter = channels_parameter if device_type_l not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set() if", "sub_type.lower() if sub_type else None if parameter in self._un_ignore_parameters_general[paramset_key]: return True if parameter", "][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if paramset_key ==", "else None if paramset_key == PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter,", "parameter_is_un_ignored( self, device_type: str, sub_type: str | None, device_channel: int, paramset_key: str, parameter:", "if parameter in un_ignore_parameters: return True return False def _add_line_to_cache(self, line: str) ->", "): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if paramset_key == PARAMSET_KEY_MASTER:", "paramset_key=paramset_key, parameter=parameter, ): return False if ( parameter in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or", "else: # add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning( \"add_line_to_cache: Could not add line", "in fptr.readlines(): self._add_line_to_cache(line) except Exception as ex: _LOGGER.warning( \"load: Could not read unignore", "channel_no ] = {} if ( paramset_key not in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ):", "\"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\",", "\"\"\" try: line = line.strip() if \"@\" in line: # add parameter@devicetype:channel_no:paramset_key data", "self._central: Final = central self._storage_folder: Final = self._central.central_config.storage_folder # paramset_key, parameter self._un_ignore_parameters_general: dict[str,", "un_ignore_params, ) in self._un_ignore_parameters_general.items(): if paramset_key not in un_ignore_parameters: un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params)", "Could not add line '%s' to un ignore cache. 4 arguments expected: e.g.", "self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type ] ):", "EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", } # Parameters within the VALUES paramset for which", "if paramset_key not in un_ignore_parameters: un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def ignore_parameter(", "= set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if paramset_key == PARAMSET_KEY_MASTER: if device_type not in", "HIDDEN_PARAMETERS: set[str] = { EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", }", "\"add_line_to_cache: Could not add line '%s' to un ignore cache. 4 arguments expected:", "\"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } # Ignore", "= device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if paramset_key == PARAMSET_KEY_VALUES:", "in self._un_ignore_parameters_by_device_paramset_key[ device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] = {} if ( paramset_key", "parameters from disk.\"\"\" def _load() -> None: if not check_or_create_directory(self._storage_folder): return if not", "channel_no ][paramset_key].add(parameter) if paramset_key == PARAMSET_KEY_MASTER: if device_type not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] =", "return paramset_key = data[0] parameter = data[1] if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter)", "in self._ignore_parameters_by_device_lower.get(parameter, []) ): return True if ( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is", "] = {} if ( paramset_key not in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][", "\"WORKING\", \"ERROR\", \"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\",", "Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file. \"\"\" try: line = line.strip()", "None: if not check_or_create_directory(self._storage_folder): return if not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load:", "return True if device_channel is not None and paramset_key == PARAMSET_KEY_MASTER: for (", "return True if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l ]", "file to cache. Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file. \"\"\" try:", "device_type in device_types] for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str, list[str]] =", "from un ignore file to cache. Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from", "self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif \":\" in line: # add parameter:paramset_key", "( device_t, un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if parameter in un_ignore_parameters: return", "file found in %s\", self._storage_folder, ) return try: with open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS,", "str, parameter: str, ) -> bool: \"\"\"Check if parameter can be ignored.\"\"\" device_type_l", "device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if paramset_key ==", "data[1] if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except", "( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}) ) for ( paramset_key, un_ignore_params, ) in", "or device_type_l.startswith(d_type.lower()) ): return True return False async def load(self) -> None: \"\"\"Load", "HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\":", "PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from hahomematic.helpers import check_or_create_directory _LOGGER = logging.getLogger(__name__) # {device_type:", "sub_type_l = sub_type.lower() if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored(", "un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}) ) for ( paramset_key, un_ignore_params,", "accept_channel != device_channel: return True if paramset_key == PARAMSET_KEY_MASTER: if parameter not in", "= {} # device_type, channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]] = {} self._init() def _init(self)", "else None if parameter in self._un_ignore_parameters_general[paramset_key]: return True if parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l,", "\"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\", } # Ignore Parameter that start with", "None if parameter in self._un_ignore_parameters_general[paramset_key]: return True if parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {}", "\"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ], # To override", "\"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\",", "= set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def ignore_parameter( self, device_type: str, sub_type: str |", "with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] = { \"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\",", "{} self._init() def _init(self) -> None: \"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const\"\"\" for", "line to from un ignore file to cache. Add data to relevant_master_paramsets_by_device and", "int, ) -> bool: \"\"\"Return if a paramset is relevant.\"\"\" device_type_l = device_type.lower()", "\"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\",", "{ \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\", } # Ignore Parameter", "[])) ) or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, []) ): return True if ( accept_channel", "{ \"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\",", "and device_channel is not None: un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {})", "self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l: if parameter in", "): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] = {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self,", ").get(device_channel, {}) ) for ( paramset_key, un_ignore_params, ) in self._un_ignore_parameters_general.items(): if paramset_key not", "override ignore for HmIP-PCBS } # Parameters by device within the VALUES paramset", "{ \"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"],", "= device_data[2] if device_type not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if ( channel_no", "in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif \":\" in line: #", "set[str] = { \"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\",", "] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] = {} if ( paramset_key not in self._un_ignore_parameters_by_device_paramset_key[", "[\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\",", "os from typing import Final import hahomematic.central_unit as hm_central from hahomematic.const import (", "True if device_channel is not None and paramset_key == PARAMSET_KEY_MASTER: for ( d_type,", "dict[str, list[str]] = { parameter: [device_type.lower() for device_type in device_types] for parameter, device_types", "= sub_type.lower() if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type,", "un ignore cache.\", line ) def is_relevant_paramset( self, device_type: str, sub_type: str |", "sub_type else None if parameter in self._un_ignore_parameters_general[paramset_key]: return True if parameter in self._un_ignore_parameters_by_device_paramset_key.get(", "True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t, un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if", "device_t, un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if parameter in un_ignore_parameters: return True", "ex: _LOGGER.warning( \"load: Could not read unignore file %s\", ex.args, ) await self._central.async_add_executor_job(_load)", "Could not add line '%s' to un ignore cache.\", line ) def is_relevant_paramset(", "5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str]", "\"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\",", "by device within the VALUES paramset for which we don't create entities. _IGNORE_PARAMETERS_BY_DEVICE:", "None if paramset_key == PARAMSET_KEY_VALUES: return True if device_channel is not None and", "channels_parameter if device_type_l not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set() if device_type_l not in", "{}).get(paramset_key, set()): return True if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[", "= device_type.lower() channel_nos, parameter = channels_parameter if device_type_l not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] =", "= line.strip() if \"@\" in line: # add parameter@devicetype:channel_no:paramset_key data = line.split(\"@\") if", "= {} if device_type_l is not None and device_channel is not None: un_ignore_parameters", "= { \"HmIPW-DRBL4\": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13, 17, 21},", "device_type, channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]] = {} self._init() def _init(self) -> None: \"\"\"Init", "self._central.central_config.storage_folder # paramset_key, parameter self._un_ignore_parameters_general: dict[str, set[str]] = { PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(),", "} # Parameters within the VALUES paramset for which we don't create entities.", "\"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\",", "str]] = { \"HmIPW-DRBL4\": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13, 17,", "None if paramset_key == PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ):", "= device_type.lower() un_ignore_parameters: dict[str, set[str]] = {} if device_type_l is not None and", "except Exception: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache.\",", "device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type, channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str,", "_IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or sub_type_l in", "add line '%s' to un ignore cache. Only one @ expected.\", line, )", "set[int]] = {} self._init() def _init(self) -> None: \"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from", "self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}) ) for ( paramset_key, un_ignore_params, ) in self._un_ignore_parameters_general.items():", "\"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } # Ignore Parameter that end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str]", "= line.split(\":\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s'", "paramset_key == PARAMSET_KEY_VALUES: return True if device_channel is not None and paramset_key ==", "line, ) return parameter = data[0] device_data = data[1].split(\":\") if len(device_data) != 3:", "encoding=DEFAULT_ENCODING, ) as fptr: for line in fptr.readlines(): self._add_line_to_cache(line) except Exception as ex:", "\"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {\"LOWBAT\": 0} class ParameterVisibilityCache: \"\"\"Cache for", "dict[int, dict[str, set[str]]] ] = {} # device_type, channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]] =", "\"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\",", "dict[str, set[int]] = {} self._init() def _init(self) -> None: \"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device", "un_ignore_parameters\"\"\" device_type_l = device_type.lower() un_ignore_parameters: dict[str, set[str]] = {} if device_type_l is not", "len(device_data) != 3: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore", "if \"@\" in line: # add parameter@devicetype:channel_no:paramset_key data = line.split(\"@\") if len(data) !=", "in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][", "\"WEEK_PROGRAM\", } # Parameters within the paramsets for which we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE:", "ignore cache. Only one @ expected.\", line, ) return parameter = data[0] device_data", "True return False def _add_line_to_cache(self, line: str) -> None: \"\"\" Add line to", "set(), PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower: dict[str, list[str]] = { parameter: [device_type.lower() for device_type", "self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][", "un_ignore_parameters_by_device from const\"\"\" for ( device_type, channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower()", "PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower: dict[str, list[str]] = { parameter: [device_type.lower() for", "# HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], #", "\"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", } # Parameters within", "\"add_line_to_cache: Could not add line '%s' to un ignore cache. Only one @", "ignore_parameter( self, device_type: str, sub_type: str | None, device_channel: int, paramset_key: str, parameter:", "(sub_type_l and sub_type_l == d_type.lower()) or device_type_l.startswith(d_type.lower()) ): return True return False async", "Final import hahomematic.central_unit as hm_central from hahomematic.const import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH,", "= { \"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\", \"PARTY_STOP\", \"STATUS_FLAG\", \"WEEK_PROGRAM\", } #", "= { EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", } # Parameters", "\"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\",", "end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] = { \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\",", "line in fptr.readlines(): self._add_line_to_cache(line) except Exception as ex: _LOGGER.warning( \"load: Could not read", "str | None, device_channel: int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Return", "= {} if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no", ") -> bool: \"\"\"Return if parameter is on un_ignore list\"\"\" device_type_l = device_type.lower()", "EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", } # Parameters within the VALUES paramset", "if device_type not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif \":\"", "PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning( \"add_line_to_cache: Could not", "paramsets for which we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"DLD\": [\"ERROR_JAMMED\"],", "dict[str, set[str]] = {} if device_type_l is not None and device_channel is not", "\"WOCHENPROGRAMM\", } # Ignore Parameter that end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] = { \"OVERFLOW\",", "un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l ] if parameter in un_ignore_parameters: return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)):", "str | None, paramset_key: str, device_channel: int, ) -> bool: \"\"\"Return if a", "EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from hahomematic.helpers import check_or_create_directory", "device_channel: int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Return if parameter is", "if parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l", "\"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ],", "self._un_ignore_parameters_by_device_paramset_key[ device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] = {} if ( paramset_key not", "paramset_key == PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type, device_channel=device_channel, paramset_key=paramset_key, parameter=parameter, ): return False", "\"\"\"Cache for parameter visibility.\"\"\" def __init__( self, central: hm_central.CentralUnit, ): self._central: Final =", "in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning( \"add_line_to_cache:", "if accept_channel != device_channel: return True if paramset_key == PARAMSET_KEY_MASTER: if parameter not", "device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] = {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def", "int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Return if parameter is on", "\"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int]", "tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, []) ): return True if (", "try: with open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING, ) as fptr: for", "# Parameters within the paramsets for which we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]]", "device_type.lower() un_ignore_parameters: dict[str, set[str]] = {} if device_type_l is not None and device_channel", "from __future__ import annotations import logging import os from typing import Final import", "{} for channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l", "None: \"\"\"Load custom un ignore parameters from disk.\"\"\" def _load() -> None: if", "def _add_line_to_cache(self, line: str) -> None: \"\"\" Add line to from un ignore", "== d_type.lower() or (sub_type_l and sub_type_l == d_type.lower()) or device_type_l.startswith(d_type.lower()) ): return True", "self._un_ignore_parameters_by_device_lower[ sub_type_l ] if parameter in un_ignore_parameters: return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for (", "self._relevant_master_paramsets_by_device[device_type_l] = set() if device_type_l not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for channel_no", "} # Parameters by device within the VALUES paramset for which we don't", "self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore", "not None and paramset_key == PARAMSET_KEY_MASTER: for ( d_type, channel_nos, ) in self._relevant_master_paramsets_by_device.items():", "os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load: No file found in %s\", self._storage_folder, )", "parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = { device_type.lower(): parameters for", "return True return False def _add_line_to_cache(self, line: str) -> None: \"\"\" Add line", "\"HmIP-DRBLI4\": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str] = { EVENT_CONFIG_PENDING, EVENT_ERROR,", "dict[str, set[str]]] ] = {} # device_type, channel_no self._relevant_master_paramsets_by_device: dict[str, set[int]] = {}", "def _init(self) -> None: \"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const\"\"\" for ( device_type,", ") -> bool: \"\"\"Check if parameter can be ignored.\"\"\" device_type_l = device_type.lower() sub_type_l", "| None, paramset_key: str, device_channel: int, ) -> bool: \"\"\"Return if a paramset", "channel_no = int(device_data[1]) paramset_key = device_data[2] if device_type not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] =", "parameter can be ignored.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else", "to from un ignore file to cache. Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device", "if device_type_l not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no)", "return False def parameter_is_un_ignored( self, device_type: str, sub_type: str | None, device_channel: int,", "in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower() channel_nos, parameter = channels_parameter if device_type_l not in", "set() if device_type_l not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for channel_no in channel_nos:", "don't create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\",", "on un_ignore list\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None", "= channels_parameter if device_type_l not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set() if device_type_l not", "if paramset_key == PARAMSET_KEY_MASTER: if parameter not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER,", "create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\",", "\"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\",", "sub_type: str | None, paramset_key: str, device_channel: int, ) -> bool: \"\"\"Return if", "Exception: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache.\", line", "not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] = {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][", "in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l and sub_type_l", "a paramset is relevant.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else", "can be ignored.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None", "dict[str, tuple[set[int], str]] = { \"HmIPW-DRBL4\": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9,", "self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if paramset_key == PARAMSET_KEY_MASTER: if", "if not check_or_create_directory(self._storage_folder): return if not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load: No", "within the paramsets for which we create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = {", "return un_ignore_parameters def ignore_parameter( self, device_type: str, sub_type: str | None, device_channel: int,", "one @ expected.\", line, ) return parameter = data[0] device_data = data[1].split(\":\") if", "), mode=\"r\", encoding=DEFAULT_ENCODING, ) as fptr: for line in fptr.readlines(): self._add_line_to_cache(line) except Exception", "dict[str, int] = {\"LOWBAT\": 0} class ParameterVisibilityCache: \"\"\"Cache for parameter visibility.\"\"\" def __init__(", "False def _add_line_to_cache(self, line: str) -> None: \"\"\" Add line to from un", "_IGNORED_PARAMETERS_WILDCARDS_END: set[str] = { \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\", }", "set[str] = { \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\", \"RESULT\", \"STATUS\", \"SUBMIT\", \"WORKING\", } #", "for device_type in device_types] for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str, list[str]]", "\"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {\"LOWBAT\": 0} class ParameterVisibilityCache:", "create entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"],", "_IGNORED_PARAMETERS: set[str] = { \"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\",", "= int(device_data[1]) paramset_key = device_data[2] if device_type not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {}", "== PARAMSET_KEY_MASTER: if device_type not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add( channel_no )", "\"LOW_BAT\", ], # To override ignore for HmIP-PCBS } # Parameters by device", "_ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not None: if accept_channel != device_channel: return True if paramset_key", "fptr: for line in fptr.readlines(): self._add_line_to_cache(line) except Exception as ex: _LOGGER.warning( \"load: Could", "if device_type_l is not None and device_channel is not None: un_ignore_parameters = (", "un_ignore_parameters_by_device from file. \"\"\" try: line = line.strip() if \"@\" in line: #", "\"ERROR\", \"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\",", "e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return device_type = device_data[0].lower() channel_no = int(device_data[1]) paramset_key =", "\"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } #", "ignore cache.\", line ) def is_relevant_paramset( self, device_type: str, sub_type: str | None,", "= logging.getLogger(__name__) # {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = { \"HmIPW-DRBL4\": ({1,", "VALUES paramset for which we don't create entities. _IGNORED_PARAMETERS: set[str] = { \"AES_KEY\",", "[]): return True return False def parameter_is_un_ignored( self, device_type: str, sub_type: str |", "and sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l ] if parameter in un_ignore_parameters:", "device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if parameter in", "( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] = {PARAMSET_KEY_MASTER:", "\"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"],", "if parameter is on un_ignore list\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if", "un_ignore list\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if", "\"DIRECTION\", } # Parameters within the VALUES paramset for which we don't create", "set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l = device_type.lower() un_ignore_parameters: dict[str, set[str]] = {} if device_type_l", "= data[1] if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER): self._un_ignore_parameters_general[paramset_key].add(parameter) else: # add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line)", ") return try: with open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING, ) as", "arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return device_type = device_data[0].lower() channel_no = int(device_data[1])", "device_type, channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower() channel_nos, parameter = channels_parameter if", "\"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\",", "line: str) -> None: \"\"\" Add line to from un ignore file to", "[\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\": [", "return False async def load(self) -> None: \"\"\"Load custom un ignore parameters from", "for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type, channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[", "\"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\",", "device_type_l not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for channel_no in channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if", "self._storage_folder, ) return try: with open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING, )", "= line.split(\"@\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s'", "{PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self, device_type: str, device_channel: int )", "sub_type_l ] if parameter in un_ignore_parameters: return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t,", "import os from typing import Final import hahomematic.central_unit as hm_central from hahomematic.const import", "\"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\",", "13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE), } HIDDEN_PARAMETERS: set[str] = { EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH,", "set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self, device_type: str, device_channel: int ) ->", "custom un ignore parameters from disk.\"\"\" def _load() -> None: if not check_or_create_directory(self._storage_folder):", "in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set() if device_type_l not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {}", "cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.\", line, ) return paramset_key = data[0] parameter", "\"HmIP-BSL\", \"HmIP-BSM\", \"HmIP-BWTH\", \"HmIP-DR\", \"HmIP-FDT\", \"HmIP-FSM\", \"HmIP-MOD-OC8\", \"HmIP-PCBS\", \"HmIP-PDT\", \"HmIP-PS\", \"HmIP-SFD\", ], }", "add line '%s' to un ignore cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.\", line,", "\"HmIPW-DRBL4\": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE), }", "self, device_type: str, device_channel: int ) -> dict[str, set[str]]: \"\"\"Return un_ignore_parameters\"\"\" device_type_l =", "not None: un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}) ) for (", "import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, )", "\"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\",", "( device_type, channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower() channel_nos, parameter = channels_parameter", "= {} if ( paramset_key not in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no", "d_type.lower()) or device_type_l.startswith(d_type.lower()) ): return True return False async def load(self) -> None:", "bool: \"\"\"Check if parameter can be ignored.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower()", "EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from hahomematic.helpers import check_or_create_directory _LOGGER =", "# HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ], # To override ignore for HmIP-PCBS", "False async def load(self) -> None: \"\"\"Load custom un ignore parameters from disk.\"\"\"", "paramset_key, parameter self._un_ignore_parameters_general: dict[str, set[str]] = { PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower:", "list[str]] = { \"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\":", "str, ) -> bool: \"\"\"Check if parameter can be ignored.\"\"\" device_type_l = device_type.lower()", "return True if ( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not None: if accept_channel", "= device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if parameter in self._un_ignore_parameters_general[paramset_key]:", "0} class ParameterVisibilityCache: \"\"\"Cache for parameter visibility.\"\"\" def __init__( self, central: hm_central.CentralUnit, ):", "in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l: if parameter", "line '%s' to un ignore cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.\", line, )", "\"STATUS\", \"SUBMIT\", \"WORKING\", } # Ignore Parameter that start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] =", "device_type.lower(): parameters for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type, channel_no, paramset_key, list[parameter]", "sub_type.lower() if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: if self.parameter_is_un_ignored( device_type=device_type, sub_type=sub_type,", "def is_relevant_paramset( self, device_type: str, sub_type: str | None, paramset_key: str, device_channel: int,", "in un_ignore_parameters: return True return False def _add_line_to_cache(self, line: str) -> None: \"\"\"", "# add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s'", "int(device_data[1]) paramset_key = device_data[2] if device_type not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type] = {} if", "EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from hahomematic.helpers import", "un ignore parameters from disk.\"\"\" def _load() -> None: if not check_or_create_directory(self._storage_folder): return", ") in self._un_ignore_parameters_general.items(): if paramset_key not in un_ignore_parameters: un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params) return", "sub_type: str | None, device_channel: int, paramset_key: str, parameter: str, ) -> bool:", "self._un_ignore_parameters_general[paramset_key]: return True if parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return", "-> None: \"\"\" Add line to from un ignore file to cache. Add", "not add line '%s' to un ignore cache.\", line ) def is_relevant_paramset( self,", "the VALUES paramset for which we don't create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] =", "set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if paramset_key == PARAMSET_KEY_MASTER: if device_type not in self._relevant_master_paramsets_by_device:", "\"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\",", "return True return False async def load(self) -> None: \"\"\"Load custom un ignore", ") as fptr: for line in fptr.readlines(): self._add_line_to_cache(line) except Exception as ex: _LOGGER.warning(", "relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file. \"\"\" try: line = line.strip() if \"@\" in", "True return False def parameter_is_un_ignored( self, device_type: str, sub_type: str | None, device_channel:", "_UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type, channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int, dict[str, set[str]]]", "= { parameter: [device_type.lower() for device_type in device_types] for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items()", ") for ( paramset_key, un_ignore_params, ) in self._un_ignore_parameters_general.items(): if paramset_key not in un_ignore_parameters:", "\"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } # Ignore Parameter that end with _IGNORED_PARAMETERS_WILDCARDS_END:", "{ parameter: [device_type.lower() for device_type in device_types] for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() }", "( parameter in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) )", "channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self, device_type: str, device_channel: int ) -> dict[str, set[str]]:", "} # Ignore Parameter that start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] = { \"ADJUSTING\", \"ERR_TTM\",", "dict[str, list[str]] = { \"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ],", "parameter: [device_type.lower() for device_type in device_types] for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower:", "True if ( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not None: if accept_channel !=", "for which we don't create entities. _IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"LOWBAT\": [", "accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not None: if accept_channel != device_channel: return True", "if device_type_l.startswith(device_t): if parameter in un_ignore_parameters: return True return False def _add_line_to_cache(self, line:", "device_type_l is not None and device_channel is not None: un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get(", "as hm_central from hahomematic.const import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS,", "self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] = set() self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key].add(parameter) if", "self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] = {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self, device_type:", "] if parameter in un_ignore_parameters: return True if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t, un_ignore_parameters,", "parameter visibility within hahomematic \"\"\" from __future__ import annotations import logging import os", "device_type_l == d_type.lower() or (sub_type_l and sub_type_l == d_type.lower()) or device_type_l.startswith(d_type.lower()) ): return", "parameter not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return True return False", "\"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\",", "which we don't create entities. _IGNORED_PARAMETERS: set[str] = { \"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\",", "): _LOGGER.debug( \"load: No file found in %s\", self._storage_folder, ) return try: with", "return parameter = data[0] device_data = data[1].split(\":\") if len(device_data) != 3: _LOGGER.warning( \"add_line_to_cache:", "-> None: \"\"\"Load custom un ignore parameters from disk.\"\"\" def _load() -> None:", "return try: with open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ), mode=\"r\", encoding=DEFAULT_ENCODING, ) as fptr:", "device_channel: return True if paramset_key == PARAMSET_KEY_MASTER: if parameter not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l,", "False if ( parameter in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter,", "is relevant.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if", "\"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } # Ignore Parameter that end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] =", "for HmIP-PCBS } # Parameters by device within the VALUES paramset for which", "_ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {\"LOWBAT\": 0} class ParameterVisibilityCache: \"\"\"Cache for parameter visibility.\"\"\" def", "sub_type.lower() if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: return True if device_channel", "bool: \"\"\"Return if a paramset is relevant.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower()", "sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, []) ): return True if ( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) )", "if not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load: No file found in %s\",", "device_data = data[1].split(\":\") if len(device_data) != 3: _LOGGER.warning( \"add_line_to_cache: Could not add line", "device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, []) ): return True if", "set[str]] = {} if device_type_l is not None and device_channel is not None:", "un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if parameter in un_ignore_parameters: return True return", "} # device_type, channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int, dict[str, set[str]]] ]", "# HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\": [ \"OPERATING_VOLTAGE\", \"LOW_BAT\", ], #", "parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un", "\"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\",", "not check_or_create_directory(self._storage_folder): return if not os.path.exists( os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS) ): _LOGGER.debug( \"load: No file", "parameter=parameter, ): return False if ( parameter in _IGNORED_PARAMETERS or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START))", "( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] = {}", "that start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] = { \"ADJUSTING\", \"ERR_TTM\", \"ERROR\", \"IDENTIFICATION_MODE_KEY_VISUAL\", \"IDENTIFY_\", \"PARTY_START\",", "None, device_channel: int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Check if parameter", "{} ).get(device_channel, {}) ) for ( paramset_key, un_ignore_params, ) in self._un_ignore_parameters_general.items(): if paramset_key", "'%s' to un ignore cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return", "None: if accept_channel != device_channel: return True if paramset_key == PARAMSET_KEY_MASTER: if parameter", "if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type ] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] =", "self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set() if device_type_l not in self._un_ignore_parameters_by_device_paramset_key: self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {} for", "\"OPERATING_VOLTAGE\", \"LOW_BAT\", ], # To override ignore for HmIP-PCBS } # Parameters by", "line '%s' to un ignore cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, )", "== PARAMSET_KEY_VALUES: return True if device_channel is not None and paramset_key == PARAMSET_KEY_MASTER:", "or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, []) ): return True if ( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter)", "\"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\", \"DATE_TIME_UNKNOWN\", \"DECISION_VALUE\", \"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\",", "line.strip() if \"@\" in line: # add parameter@devicetype:channel_no:paramset_key data = line.split(\"@\") if len(data)", "not add line '%s' to un ignore cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\",", "is not None and paramset_key == PARAMSET_KEY_MASTER: for ( d_type, channel_nos, ) in", "\"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\":", "\"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\", \"HmIP-PCBS\"], \"OPERATING_VOLTAGE\": [ \"HmIP-BDT\", \"HmIP-BSL\", \"HmIP-BSM\",", "PARAMSET_KEY_VALUES: return True if device_channel is not None and paramset_key == PARAMSET_KEY_MASTER: for", "_RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = { \"HmIPW-DRBL4\": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE), \"HmIP-DRBLI4\":", "None and device_channel is not None: un_ignore_parameters = ( self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel,", "PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", } # Parameters within the VALUES paramset for which we", "ignore for HmIP-PCBS } # Parameters by device within the VALUES paramset for", "or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter, []) ): return True", "] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] = {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters(", "( paramset_key not in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no] ): self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ][paramset_key] = set()", "self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif \":\" in line: # add", "annotations import logging import os from typing import Final import hahomematic.central_unit as hm_central", "][paramset_key].add(parameter) if paramset_key == PARAMSET_KEY_MASTER: if device_type not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set()", "{ device_type.lower(): parameters for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type, channel_no, paramset_key,", "visibility.\"\"\" def __init__( self, central: hm_central.CentralUnit, ): self._central: Final = central self._storage_folder: Final", "device_channel is not None and paramset_key == PARAMSET_KEY_MASTER: for ( d_type, channel_nos, )", "or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END)) or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START)) or device_type_l.startswith( tuple(self._ignore_parameters_by_device_lower.get(parameter, [])) ) or sub_type_l in self._ignore_parameters_by_device_lower.get(parameter,", "line = line.strip() if \"@\" in line: # add parameter@devicetype:channel_no:paramset_key data = line.split(\"@\")", "relevant.\"\"\" device_type_l = device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if paramset_key", "for line in fptr.readlines(): self._add_line_to_cache(line) except Exception as ex: _LOGGER.warning( \"load: Could not", "str) -> None: \"\"\" Add line to from un ignore file to cache.", "\"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\", \"STATE_UNCERTAIN\", \"SWITCH_POINT_OCCURED\", \"TEMPERATURE_LIMITER\", \"TEMPERATURE_OUT_OF_RANGE\", \"TIME_OF_OPERATION\", \"WOCHENPROGRAMM\", } # Ignore Parameter", "self._ignore_parameters_by_device_lower.get(parameter, []) ): return True if ( accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter) ) is not", "fptr.readlines(): self._add_line_to_cache(line) except Exception as ex: _LOGGER.warning( \"load: Could not read unignore file", "], } _ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {\"LOWBAT\": 0} class ParameterVisibilityCache: \"\"\"Cache for parameter", "paramset_key: str, parameter: str, ) -> bool: \"\"\"Return if parameter is on un_ignore", "sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l ] if parameter in", "data[1].split(\":\") if len(device_data) != 3: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to", ") return device_type = device_data[0].lower() channel_no = int(device_data[1]) paramset_key = device_data[2] if device_type", "_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\",", "if sub_type else None if parameter in self._un_ignore_parameters_general[paramset_key]: return True if parameter in", "str, parameter: str, ) -> bool: \"\"\"Return if parameter is on un_ignore list\"\"\"", "Ignore Parameter that end with _IGNORED_PARAMETERS_WILDCARDS_END: set[str] = { \"OVERFLOW\", \"OVERHEAT\", \"OVERRUN\", \"REPORTING\",", "add line '%s' to un ignore cache.\", line ) def is_relevant_paramset( self, device_type:", "} self._ignore_parameters_by_device_lower: dict[str, list[str]] = { parameter: [device_type.lower() for device_type in device_types] for", "_RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower() channel_nos, parameter = channels_parameter if device_type_l not in self._relevant_master_paramsets_by_device:", "ParameterVisibilityCache: \"\"\"Cache for parameter visibility.\"\"\" def __init__( self, central: hm_central.CentralUnit, ): self._central: Final", "except Exception as ex: _LOGGER.warning( \"load: Could not read unignore file %s\", ex.args,", "found in %s\", self._storage_folder, ) return try: with open( file=os.path.join( self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS, ),", "EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, PARAM_CHANNEL_OPERATION_MODE, \"ACTIVITY_STATE\", \"DIRECTION\", } # Parameters within the VALUES", "disk.\"\"\" def _load() -> None: if not check_or_create_directory(self._storage_folder): return if not os.path.exists( os.path.join(self._storage_folder,", "\"HM-Sec-Win\": [\"DIRECTION\", \"WORKING\", \"ERROR\", \"STATUS\"], # HM-Sec-Win* \"HM-Sec-Key\": [\"DIRECTION\", \"ERROR\"], # HM-Sec-Key* \"HmIP-PCBS-BAT\":", "device_channel: int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Check if parameter can", "parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l and", "visibility within hahomematic \"\"\" from __future__ import annotations import logging import os from", "parameter self._un_ignore_parameters_general: dict[str, set[str]] = { PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower: dict[str,", "if device_type_l not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set() if device_type_l not in self._un_ignore_parameters_by_device_paramset_key:", "data = line.split(\"@\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add line", "entities. _UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], #", "device_type.lower() sub_type_l = sub_type.lower() if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: if", "device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return True return False def parameter_is_un_ignored( self, device_type:", "_LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache. 4 arguments", ") from hahomematic.helpers import check_or_create_directory _LOGGER = logging.getLogger(__name__) # {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str,", "channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = { \"HmIPW-DRBL4\": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE),", "): return True return False async def load(self) -> None: \"\"\"Load custom un", "self._un_ignore_parameters_by_device_paramset_key[device_type][ channel_no ] = {} if ( paramset_key not in self._un_ignore_parameters_by_device_paramset_key[ device_type ][channel_no]", "parameter in un_ignore_parameters: return True return False def _add_line_to_cache(self, line: str) -> None:", "sub_type else None if paramset_key == PARAMSET_KEY_VALUES: return True if device_channel is not", "is_relevant_paramset( self, device_type: str, sub_type: str | None, paramset_key: str, device_channel: int, )", "DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from hahomematic.helpers", "self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ] = {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter)", "len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore", "2 arguments expected: e.g. TEMPERATURE:VALUES.\", line, ) return paramset_key = data[0] parameter =", "\"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\",", "central self._storage_folder: Final = self._central.central_config.storage_folder # paramset_key, parameter self._un_ignore_parameters_general: dict[str, set[str]] = {", "don't create entities. _IGNORED_PARAMETERS: set[str] = { \"AES_KEY\", \"BOOST_TIME\", \"BOOT\", \"BURST_LIMIT_WARNING\", \"CLEAR_WINDOW_OPEN_SYMBOL\", \"COMBINED_PARAMETER\",", "\"SUBMIT\", \"WORKING\", } # Ignore Parameter that start with _IGNORED_PARAMETERS_WILDCARDS_START: set[str] = {", "self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if sub_type_l and sub_type_l in", "self._ignore_parameters_by_device_lower: dict[str, list[str]] = { parameter: [device_type.lower() for device_type in device_types] for parameter,", "ignore file to cache. Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file. \"\"\"", "self._un_ignore_parameters_general: dict[str, set[str]] = { PARAMSET_KEY_MASTER: set(), PARAMSET_KEY_VALUES: set(), } self._ignore_parameters_by_device_lower: dict[str, list[str]]", "paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key: dict[ str, dict[int, dict[str, set[str]]] ] = {} # device_type,", "expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return device_type = device_data[0].lower() channel_no = int(device_data[1]) paramset_key", "central: hm_central.CentralUnit, ): self._central: Final = central self._storage_folder: Final = self._central.central_config.storage_folder # paramset_key,", "from const\"\"\" for ( device_type, channels_parameter, ) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items(): device_type_l = device_type.lower() channel_nos,", "True return False async def load(self) -> None: \"\"\"Load custom un ignore parameters", "hahomematic.helpers import check_or_create_directory _LOGGER = logging.getLogger(__name__) # {device_type: channel_no} _RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]]", "# add parameter:paramset_key data = line.split(\":\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could", "import annotations import logging import os from typing import Final import hahomematic.central_unit as", "[device_type.lower() for device_type in device_types] for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str,", "parameters for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items() } # device_type, channel_no, paramset_key, list[parameter] self._un_ignore_parameters_by_device_paramset_key:", "channel_no ] = {PARAMSET_KEY_MASTER: set()} self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ][PARAMSET_KEY_MASTER].add(parameter) def get_un_ignore_parameters( self, device_type: str,", "device_types in _IGNORE_PARAMETERS_BY_DEVICE.items() } self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = { device_type.lower(): parameters for device_type,", "un_ignore_parameters[paramset_key] = set() un_ignore_parameters[paramset_key].update(un_ignore_params) return un_ignore_parameters def ignore_parameter( self, device_type: str, sub_type: str", "parameter = channels_parameter if device_type_l not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type_l] = set() if device_type_l", "self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return True return False def parameter_is_un_ignored( self,", "device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)): for ( device_t, un_ignore_parameters, ) in self._un_ignore_parameters_by_device_lower.items(): if device_type_l.startswith(device_t): if parameter in", "to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file. \"\"\" try: line = line.strip() if \"@\"", "not in self._relevant_master_paramsets_by_device: self._relevant_master_paramsets_by_device[device_type] = set() self._relevant_master_paramsets_by_device[device_type].add( channel_no ) elif \":\" in line:", ") elif \":\" in line: # add parameter:paramset_key data = line.split(\":\") if len(data)", ") in self._relevant_master_paramsets_by_device.items(): if device_channel in channel_nos and ( device_type_l == d_type.lower() or", "return False def _add_line_to_cache(self, line: str) -> None: \"\"\" Add line to from", "in line: # add parameter:paramset_key data = line.split(\":\") if len(data) != 2: _LOGGER.warning(", "_UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = { \"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD", "sub_type_l in self._un_ignore_parameters_by_device_lower: un_ignore_parameters = self._un_ignore_parameters_by_device_lower[ sub_type_l ] if parameter in un_ignore_parameters: return", "_LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to un ignore cache.\", line )", "line ) def is_relevant_paramset( self, device_type: str, sub_type: str | None, paramset_key: str,", "_add_line_to_cache(self, line: str) -> None: \"\"\" Add line to from un ignore file", "self, device_type: str, sub_type: str | None, paramset_key: str, device_channel: int, ) ->", "PARAMSET_KEY_MASTER: if parameter not in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []): return True", "the VALUES paramset for which we don't create entities. _IGNORED_PARAMETERS: set[str] = {", "= sub_type.lower() if sub_type else None if paramset_key == PARAMSET_KEY_VALUES: return True if", "channel_nos, ) in self._relevant_master_paramsets_by_device.items(): if device_channel in channel_nos and ( device_type_l == d_type.lower()", "None: \"\"\" Add line to from un ignore file to cache. Add data", "add parameter self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line) except Exception: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to", "= { \"LOWBAT\": [ \"HM-LC-Sw1-FM\", \"HM-LC-Sw1PBU-FM\", \"HM-LC-Sw1-Pl-DN-R1\", \"HM-LC-Sw1-PCB\", \"HM-LC-Sw4-DR\", \"HM-SwI-3-FM\", ], \"LOW_BAT\": [\"HmIP-BWTH\",", "True if parameter in self._un_ignore_parameters_by_device_paramset_key.get( device_type_l, {} ).get(device_channel, {}).get(paramset_key, set()): return True if", "self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no ]", "if paramset_key == PARAMSET_KEY_VALUES: return True if device_channel is not None and paramset_key", "( DEFAULT_ENCODING, EVENT_CONFIG_PENDING, EVENT_ERROR, EVENT_STICKY_UN_REACH, EVENT_UN_REACH, EVENT_UPDATE_PENDING, FILE_CUSTOM_UN_IGNORE_PARAMETERS, PARAM_CHANNEL_OPERATION_MODE, PARAMSET_KEY_MASTER, PARAMSET_KEY_VALUES, ) from", "int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Check if parameter can be", "un_ignore_parameters: return True return False def _add_line_to_cache(self, line: str) -> None: \"\"\" Add", "line.split(\"@\") if len(data) != 2: _LOGGER.warning( \"add_line_to_cache: Could not add line '%s' to", "\"OLD_LEVEL\", \"PARTY_SET_POINT_TEMPERATURE\", \"PARTY_TIME_END\", \"PARTY_TIME_START\", \"PROCESS\", \"QUICK_VETO_TIME\", \"RAMP_STOP\", \"RELOCK_DELAY\", \"SECTION\", \"SELF_CALIBRATION\", \"SENSOR_ERROR\", \"SET_SYMBOL_FOR_HEATING_PHASE\", \"SMOKE_DETECTOR_COMMAND\",", "un ignore file to cache. Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file.", "\"DEVICE_IN_BOOTLOADER\", \"DEW_POINT_ALARM\", \"EMERGENCY_OPERATION\", \"EXTERNAL_CLOCK\", \"FROST_PROTECTION\", \"HUMIDITY_LIMITER\", \"IDENTIFICATION_MODE_LCD_BACKLIGHT\", \"INCLUSION_UNSUPPORTED_DEVICE\", \"INHIBIT\", \"INSTALL_MODE\", \"LEVEL_COMBINED\", \"LEVEL_REAL\", \"OLD_LEVEL\",", "_init(self) -> None: \"\"\"Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const\"\"\" for ( device_type, channels_parameter,", "to un ignore cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.\", line, ) return device_type", "channel_nos: self._relevant_master_paramsets_by_device[device_type_l].add(channel_no) if ( channel_no not in self._un_ignore_parameters_by_device_paramset_key[ device_type_l ] ): self._un_ignore_parameters_by_device_paramset_key[device_type_l][ channel_no", "list[str]] = { \"DLD\": [\"ERROR_JAMMED\"], # HmIP-DLD \"SD\": [\"SMOKE_DETECTOR_ALARM_STATUS\"], # HmIP-SWSD \"HM-Sec-Win\": [\"DIRECTION\",", "un_ignore_parameters def ignore_parameter( self, device_type: str, sub_type: str | None, device_channel: int, paramset_key:", "typing import Final import hahomematic.central_unit as hm_central from hahomematic.const import ( DEFAULT_ENCODING, EVENT_CONFIG_PENDING,", "return True if sub_type_l: if parameter in self._un_ignore_parameters_by_device_paramset_key.get( sub_type_l, {} ).get(device_channel, {}).get(paramset_key, set()):", "not None: if accept_channel != device_channel: return True if paramset_key == PARAMSET_KEY_MASTER: if", "for parameter visibility.\"\"\" def __init__( self, central: hm_central.CentralUnit, ): self._central: Final = central", "| None, device_channel: int, paramset_key: str, parameter: str, ) -> bool: \"\"\"Return if" ]
[ "Client as _Client from httpx import AsyncClient as _AsyncClient from httpx import Response", "min_status_code: int = None, **kwargs) -> bool: \"\"\" Returns a bool of whether", "return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) def get_data(self, path: str, key:", "str, max_status_code: int = 300, min_status_code: int = None, **kwargs) -> bool: \"\"\"", "None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.base_url", "-> Union[Response, HttpResponse]: resp = self.client.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "= 'sync', method = 'get') def head(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "a health check \"\"\" res = await self.async_get(url=path, **kwargs) if min_status_code and max_status_code:", "client_type = 'sync', method = 'head') def patch(self, path: str, **kwargs) -> Union[Response,", "does not get the key, returns None. \"\"\" resp = await self.async_get(url=path, **kwargs)", "'async', method = 'delete') async def async_get(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs) return self._web @property def aclient(self): if not self._async: self._async", "= self.get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code)) if", "method = 'put') def post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "\"\"\" res = self.get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code,", "@property def client(self): if not self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs) return", "if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) ############################################################################# # Async Supplementary", "\"\", config: Dict[str, Any] = None, **kwargs) -> Type[_Client]: \"\"\"Creates a Sync httpx", "JSON. If does not get the key, returns None. \"\"\" resp = await", "an int Can be used as a health check \"\"\" res = self.get(url=path,", "= await self.aclient.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.delete(url=path, **kwargs) if self._default_mode: return", "**kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) ############################################################################# # Async", "= None self._default_mode = False self.set_configs(base_url = base_url, headers = headers, config =", "**kwargs) -> Union[Response, HttpResponse]: resp = self.client.head(url=path, **kwargs) if self._default_mode: return resp return", "resp, client_type = 'sync', method = 'post') ############################################################################# # Async REST Methods #", "return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient = ApiClient __all__ = [ 'Client',", "= config, async_config = async_config, module_name = module_name, default_resp = default_resp, **kwargs) def", "base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny =", "# ############################################################################# def ping(self, path: str, max_status_code: int = 300, min_status_code: int =", "= cls.create_client() return cls._web @classproperty def async_client(cls) -> Type[_AsyncClient]: if not cls._async: cls._async", "'sync', method = 'head') def patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "GET request to Path as a LazyCls \"\"\" data = await self.async_get_data(path=path, key=key,", "as a LazyCls \"\"\" data = await self.async_get_data(path=path, key=key, **kwargs) if not data:", "'sync', method = 'put') def post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "check \"\"\" res = self.get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in", "min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) async def async_get_data(self, path:", "return Response(resp = resp, client_type = 'async', method = 'put') async def async_post(self,", "return _Client(base_url = base_url, **client_config, **kwargs) @classmethod def create_async_client(cls, base_url: str = \"\",", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.post(url=path, **kwargs) if self._default_mode: return", "if _httpx_available: from httpx import Client as _Client from httpx import AsyncClient as", "_AsyncClient, HttpResponse = object, object, object class Client: _web: _Client = None _async:", "= module_name, default_resp = default_resp, **kwargs) def set_configs(self, base_url: str = HttpConfigz.base_url or", "default_resp: bool = False, **kwargs): self.set_configs(base_url = base_url, headers = headers, config =", "self.aclient.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'async',", "get the key, returns None. \"\"\" resp = self.get(url=path, **kwargs) return resp.data.get(key, None)", "-> Type[_AsyncClient]: \"\"\" Creates an async httpx Client\"\"\" _ensure_api_reqs() configz = AsyncHttpConfigz() if", "= {} self._web = None self._async = None self._default_mode = False self.set_configs(base_url =", "whether response code is great/within range/less than an int Can be used as", "object, object class Client: _web: _Client = None _async: _AsyncClient = None @classmethod", "= resp, client_type = 'async', method = 'head') async def async_patch(self, path: str,", "self.config = None self.async_config = None self._module_name = None self._kwargs = {} self._web", "'post') ############################################################################# # Supplementary Helpful Callers # ############################################################################# def ping(self, path: str, max_status_code:", "ApiClient: def __init__(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {},", "self._default_mode: return resp return Response(resp = resp, client_type = 'async', method = 'delete')", "= 'get') def head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.head(url=path,", "resp = self.client.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "import Response as HttpResponse else: _Client, _AsyncClient, HttpResponse = object, object, object class", "delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.delete(url=path, **kwargs) if self._default_mode:", "kwargs or self._kwargs def reset_clients(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny", "= 'patch') async def async_put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "self._default_mode: return resp return Response(resp = resp, client_type = 'async', method = 'put')", "AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.set_configs(base_url = base_url, headers = headers, config", "module_name, default_resp = default_resp, **kwargs) self._web = None self._async = None @property def", "Callers # ############################################################################# def ping(self, path: str, max_status_code: int = 300, min_status_code: int", "resp = self.client.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "resp = self.get(url=path, **kwargs) return resp.data.get(key, None) def get_lazycls(self, path: str, key: str", "Response(resp = resp, client_type = 'async', method = 'patch') async def async_put(self, path:", "path: str, max_status_code: int = 300, min_status_code: int = None, **kwargs) -> bool:", ".types import * from .utils import convert_to_cls from .base_imports import _httpx_available, _ensure_api_reqs if", "**kwargs): self.base_url = base_url or self.base_url self.headers = headers or self.headers self.config =", "import * from .types import * from .utils import convert_to_cls from .base_imports import", "resp, client_type = 'sync', method = 'put') def post(self, path: str, **kwargs) ->", "= configz.httpx_config if 'headers' in kwargs: headers = kwargs.pop('headers') if headers: client_config['headers'] =", "Response(resp = resp, client_type = 'sync', method = 'get') def head(self, path: str,", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.delete(url=path, **kwargs) if self._default_mode:", "self.aclient.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'async',", "get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.get(url=path, **kwargs) if self._default_mode:", "if headers: client_config['headers'] = headers return _AsyncClient(base_url = base_url, **client_config, **kwargs) @classproperty def", "# Supplementary Helpful Callers # ############################################################################# def ping(self, path: str, max_status_code: int =", "None) async def async_get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]:", "None @property def client(self): if not self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs)", "-> Union[Response, HttpResponse]: resp = await self.aclient.head(url=path, **kwargs) if self._default_mode: return resp return", "= False self.set_configs(base_url = base_url, headers = headers, config = config, async_config =", "self.get(url=path, **kwargs) return resp.data.get(key, None) def get_lazycls(self, path: str, key: str = 'data',", "= 'patch') def put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.put(url=path,", "def async_get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.get(url=path, **kwargs)", "as a LazyCls \"\"\" data = self.get_data(path=path, key=key, **kwargs) if not data: return", "key=key, **kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient =", "resp = await self.aclient.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "async def async_post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.post(url=path,", "**kwargs) -> Union[Response, HttpResponse]: resp = self.client.delete(url=path, **kwargs) if self._default_mode: return resp return", "Union[Response, HttpResponse]: resp = self.client.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "method = 'post') ############################################################################# # Async REST Methods # ############################################################################# async def async_delete(self,", "if min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) def get_data(self, path:", "key: str = 'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects to get data in", "############################################################################# def delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.delete(url=path, **kwargs)", "as a health check \"\"\" res = self.get(url=path, **kwargs) if min_status_code and max_status_code:", "return resp return Response(resp = resp, client_type = 'async', method = 'patch') async", "a GET request to Path as a LazyCls \"\"\" data = self.get_data(path=path, key=key,", "a Sync httpx Client\"\"\" _ensure_api_reqs() configz = HttpConfigz() if config: configz.update_config(**config) client_config =", "httpx Client\"\"\" _ensure_api_reqs() configz = HttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if", "DictAny = None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False,", "= await self.async_get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code))", "from a GET request to Path as a LazyCls \"\"\" data = self.get_data(path=path,", "= 'sync', method = 'patch') def put(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "_ensure_api_reqs() configz = HttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in", "module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): _ensure_api_reqs() self.base_url", "return resp return Response(resp = resp, client_type = 'async', method = 'head') async", "'async', method = 'head') async def async_patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "a health check \"\"\" res = self.get(url=path, **kwargs) if min_status_code and max_status_code: return", "async_get_data(self, path: str, key: str = 'data', **kwargs) -> DataType: \"\"\" Expects to", "module_name, default_resp = default_resp, **kwargs) def set_configs(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url,", "# Async Supplementary Helpful Callers # ############################################################################# async def async_ping(self, path: str, max_status_code:", "self._async = None @property def client(self): if not self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config,", "get the key, returns None. Returns the data from a GET request to", "Response(resp = resp, client_type = 'sync', method = 'put') def post(self, path: str,", "async_config = async_config, module_name = module_name, default_resp = default_resp, **kwargs) def set_configs(self, base_url:", "module_name = module_name, default_resp = default_resp, **kwargs) def set_configs(self, base_url: str = HttpConfigz.base_url", "await self.async_get(url=path, **kwargs) return resp.data.get(key, None) async def async_get_lazycls(self, path: str, key: str", "**kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.head(url=path, **kwargs) if self._default_mode: return resp", "{}, config: DictAny = None, async_config: DictAny = None, module_name: str = HttpConfigz.module_name", "HttpResponse]: resp = self.client.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs) return self._async ############################################################################# # Base REST APIs # #############################################################################", "not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) ############################################################################# # Async Supplementary Helpful", "used as a health check \"\"\" res = await self.async_get(url=path, **kwargs) if min_status_code", "self.client.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'sync',", "_ensure_api_reqs() configz = AsyncHttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in", "base_url, headers = headers, config = config, async_config = async_config, module_name = module_name,", "Helpful Callers # ############################################################################# def ping(self, path: str, max_status_code: int = 300, min_status_code:", "APIClient = ApiClient __all__ = [ 'Client', 'HttpResponse', 'ApiClient', 'APIClient', '_Client', '_AsyncClient' ]", "def get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.get(url=path, **kwargs) if", "from .config import * from .types import * from .utils import convert_to_cls from", "str, key: str = 'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects to get data", "import AsyncClient as _AsyncClient from httpx import Response as HttpResponse else: _Client, _AsyncClient,", "return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) async def async_get_data(self, path: str,", "def async_head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.head(url=path, **kwargs)", "Response(resp = resp, client_type = 'async', method = 'post') ############################################################################# # Supplementary Helpful", "resp = await self.aclient.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient = ApiClient __all__", "= 'async', method = 'post') ############################################################################# # Supplementary Helpful Callers # ############################################################################# def", "annotations from lazy.types import * from lazy.models import BaseCls from .config import *", "False, **kwargs): self.set_configs(base_url = base_url, headers = headers, config = config, async_config =", "= None self._async = None self._default_mode = False self.set_configs(base_url = base_url, headers =", "resp, client_type = 'sync', method = 'delete') def get(self, path: str, **kwargs) ->", "Response(resp = resp, client_type = 'sync', method = 'post') ############################################################################# # Async REST", "\"\"\" resp = self.get(url=path, **kwargs) return resp.data.get(key, None) def get_lazycls(self, path: str, key:", "self.base_url = \"\" self.headers = {} self.config = None self.async_config = None self._module_name", "None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient = ApiClient __all__ = [ 'Client', 'HttpResponse',", "request to Path as a LazyCls \"\"\" data = self.get_data(path=path, key=key, **kwargs) if", "Returns the data from a GET request to Path as a LazyCls \"\"\"", "self._default_mode: return resp return Response(resp = resp, client_type = 'async', method = 'patch')", "= 'async', method = 'head') async def async_patch(self, path: str, **kwargs) -> Union[Response,", "= config or self.config self.async_config = async_config or self.async_config self._module_name = module_name or", "Response(resp = resp, client_type = 'async', method = 'head') async def async_patch(self, path:", "head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.head(url=path, **kwargs) if self._default_mode:", "self.client.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'sync',", "_ensure_api_reqs() self.base_url = \"\" self.headers = {} self.config = None self.async_config = None", "# Base REST APIs # ############################################################################# def delete(self, path: str, **kwargs) -> Union[Response,", "if not cls._web: cls._web = cls.create_client() return cls._web @classproperty def async_client(cls) -> Type[_AsyncClient]:", "HttpResponse]: resp = self.client.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "= base_url, **client_config, **kwargs) @classproperty def client(cls) -> Type[_Client]: if not cls._web: cls._web", "None self.async_config = None self._module_name = None self._kwargs = {} self._web = None", "be used as a health check \"\"\" res = self.get(url=path, **kwargs) if min_status_code", "Supplementary Helpful Callers # ############################################################################# async def async_ping(self, path: str, max_status_code: int =", "_AsyncClient from httpx import Response as HttpResponse else: _Client, _AsyncClient, HttpResponse = object,", "= async_config, module_name = module_name, default_resp = default_resp, **kwargs) self._web = None self._async", "config: Dict[str, Any] = None, **kwargs) -> Type[_Client]: \"\"\"Creates a Sync httpx Client\"\"\"", "= 'async', method = 'put') async def async_post(self, path: str, **kwargs) -> Union[Response,", "headers=self.headers, **self._kwargs) return self._async ############################################################################# # Base REST APIs # ############################################################################# def delete(self,", "self._default_mode: return resp return Response(resp = resp, client_type = 'sync', method = 'delete')", "\"\"\" data = await self.async_get_data(path=path, key=key, **kwargs) if not data: return None return", "headers = kwargs.pop('headers') if headers: client_config['headers'] = headers return _AsyncClient(base_url = base_url, **client_config,", "set_configs(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny", "headers: client_config['headers'] = headers return _AsyncClient(base_url = base_url, **client_config, **kwargs) @classproperty def client(cls)", "or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.base_url = base_url or self.base_url self.headers", "'sync', method = 'delete') def get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "client_type = 'sync', method = 'post') ############################################################################# # Async REST Methods # #############################################################################", "return resp return Response(resp = resp, client_type = 'sync', method = 'delete') def", "Type[_Client]: \"\"\"Creates a Sync httpx Client\"\"\" _ensure_api_reqs() configz = HttpConfigz() if config: configz.update_config(**config)", "key=key, **kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) ############################################################################# #", "self._kwargs def reset_clients(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {},", "if min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) async def async_get_data(self,", "cls.create_client() return cls._web @classproperty def async_client(cls) -> Type[_AsyncClient]: if not cls._async: cls._async =", "'get') async def async_head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await", "None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): _ensure_api_reqs()", "resp, client_type = 'async', method = 'delete') async def async_get(self, path: str, **kwargs)", "None. \"\"\" resp = self.get(url=path, **kwargs) return resp.data.get(key, None) def get_lazycls(self, path: str,", "return bool(res.status_code in range(min_status_code, max_status_code)) if min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code", ".utils import convert_to_cls from .base_imports import _httpx_available, _ensure_api_reqs if _httpx_available: from httpx import", "create_client(cls, base_url: str = \"\", config: Dict[str, Any] = None, **kwargs) -> Type[_Client]:", "**kwargs) self._web = None self._async = None @property def client(self): if not self._web:", "-> DataType: \"\"\" Expects to get data in JSON. If does not get", "= object, object, object class Client: _web: _Client = None _async: _AsyncClient =", "**kwargs) -> Type[_Client]: \"\"\"Creates a Sync httpx Client\"\"\" _ensure_api_reqs() configz = HttpConfigz() if", "config: Dict[str, Any] = None, **kwargs) -> Type[_AsyncClient]: \"\"\" Creates an async httpx", "self.set_configs(base_url = base_url, headers = headers, config = config, async_config = async_config, module_name", "check \"\"\" res = await self.async_get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code", "Async REST Methods # ############################################################################# async def async_delete(self, path: str, **kwargs) -> Union[Response,", "-> Union[Response, HttpResponse]: resp = await self.aclient.patch(url=path, **kwargs) if self._default_mode: return resp return", "AsyncHttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in kwargs: headers =", "or self.base_url self.headers = headers or self.headers self.config = config or self.config self.async_config", "as _AsyncClient from httpx import Response as HttpResponse else: _Client, _AsyncClient, HttpResponse =", "Union[Response, HttpResponse]: resp = self.client.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "= await self.aclient.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "Path as a LazyCls \"\"\" data = self.get_data(path=path, key=key, **kwargs) if not data:", "_web: _Client = None _async: _AsyncClient = None @classmethod def create_client(cls, base_url: str", "-> Union[Response, HttpResponse]: resp = await self.aclient.delete(url=path, **kwargs) if self._default_mode: return resp return", "DictAny = {}, config: DictAny = None, async_config: DictAny = None, module_name: str", "headers = headers, config = config, async_config = async_config, module_name = module_name, default_resp", "= resp, client_type = 'sync', method = 'post') ############################################################################# # Async REST Methods", "await self.async_get_data(path=path, key=key, **kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key)", "self._default_mode self._kwargs = kwargs or self._kwargs def reset_clients(self, base_url: str = HttpConfigz.base_url or", "lazy.types import * from lazy.models import BaseCls from .config import * from .types", "resp = await self.aclient.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "Supplementary Helpful Callers # ############################################################################# def ping(self, path: str, max_status_code: int = 300,", "self.config = config or self.config self.async_config = async_config or self.async_config self._module_name = module_name", "config=self.async_config, headers=self.headers, **self._kwargs) return self._async ############################################################################# # Base REST APIs # ############################################################################# def", "'patch') async def async_put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await", "resp, client_type = 'async', method = 'put') async def async_post(self, path: str, **kwargs)", "-> Union[Response, HttpResponse]: resp = await self.aclient.post(url=path, **kwargs) if self._default_mode: return resp return", "return Response(resp = resp, client_type = 'async', method = 'delete') async def async_get(self,", "HttpResponse]: resp = await self.aclient.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "Type[BaseCls]: \"\"\" Expects to get data in JSON. If does not get the", "**kwargs) return resp.data.get(key, None) async def async_get_lazycls(self, path: str, key: str = 'data',", "as HttpResponse else: _Client, _AsyncClient, HttpResponse = object, object, object class Client: _web:", "return resp.data.get(key, None) async def async_get_lazycls(self, path: str, key: str = 'data', **kwargs)", "= None _async: _AsyncClient = None @classmethod def create_client(cls, base_url: str = \"\",", "config or self.config self.async_config = async_config or self.async_config self._module_name = module_name or self._module_name", "< max_status_code) async def async_get_data(self, path: str, key: str = 'data', **kwargs) ->", "None, **kwargs) -> Type[_Client]: \"\"\"Creates a Sync httpx Client\"\"\" _ensure_api_reqs() configz = HttpConfigz()", "= await self.async_get_data(path=path, key=key, **kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name,", "object class Client: _web: _Client = None _async: _AsyncClient = None @classmethod def", "max_status_code) def get_data(self, path: str, key: str = 'data', **kwargs) -> DataType: \"\"\"", "self.async_config = async_config or self.async_config self._module_name = module_name or self._module_name self._default_mode = default_resp", "or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): _ensure_api_reqs() self.base_url = \"\" self.headers =", "self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs) return self._web @property def aclient(self): if not", "= 'delete') async def async_get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "'patch') def put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.put(url=path, **kwargs)", "an int Can be used as a health check \"\"\" res = await", "module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.set_configs(base_url =", "resp, client_type = 'sync', method = 'head') def patch(self, path: str, **kwargs) ->", "def put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.put(url=path, **kwargs) if", "key, returns None. Returns the data from a GET request to Path as", "'headers' in kwargs: headers = kwargs.pop('headers') if headers: client_config['headers'] = headers return _AsyncClient(base_url", "int Can be used as a health check \"\"\" res = await self.async_get(url=path,", "False, **kwargs): _ensure_api_reqs() self.base_url = \"\" self.headers = {} self.config = None self.async_config", "Union[Response, HttpResponse]: resp = self.client.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "Union[Response, HttpResponse]: resp = await self.aclient.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "= 'data', **kwargs) -> DataType: \"\"\" Expects to get data in JSON. If", "data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient = ApiClient __all__ = [", "Response(resp = resp, client_type = 'async', method = 'get') async def async_head(self, path:", "headers=self.headers, **self._kwargs) return self._web @property def aclient(self): if not self._async: self._async = Client.create_async_client(base_url=self.base_url,", "= 'get') async def async_head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "< max_status_code) def get_data(self, path: str, key: str = 'data', **kwargs) -> DataType:", "config, async_config = async_config, module_name = module_name, default_resp = default_resp, **kwargs) def set_configs(self,", "async_config: DictAny = None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool =", "-> Type[_Client]: if not cls._web: cls._web = cls.create_client() return cls._web @classproperty def async_client(cls)", "str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.base_url = base_url", "self.client.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'sync',", "response code is great/within range/less than an int Can be used as a", "JSON. If does not get the key, returns None. \"\"\" resp = self.get(url=path,", "resp = await self.async_get(url=path, **kwargs) return resp.data.get(key, None) async def async_get_lazycls(self, path: str,", "= resp, client_type = 'sync', method = 'get') def head(self, path: str, **kwargs)", "used as a health check \"\"\" res = self.get(url=path, **kwargs) if min_status_code and", "# ############################################################################# def delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.delete(url=path,", "convert_to_cls(resp=data, module_name=self._module_name, base_key=key) ############################################################################# # Async Supplementary Helpful Callers # ############################################################################# async def", "from .base_imports import _httpx_available, _ensure_api_reqs if _httpx_available: from httpx import Client as _Client", "self._default_mode: return resp return Response(resp = resp, client_type = 'sync', method = 'post')", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.put(url=path, **kwargs) if self._default_mode:", "'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects to get data in JSON. If does", "If does not get the key, returns None. Returns the data from a", "None self._kwargs = {} self._web = None self._async = None self._default_mode = False", "import BaseCls from .config import * from .types import * from .utils import", "resp return Response(resp = resp, client_type = 'sync', method = 'delete') def get(self,", "str = 'data', **kwargs) -> DataType: \"\"\" Expects to get data in JSON.", "be used as a health check \"\"\" res = await self.async_get(url=path, **kwargs) if", "self.async_config = None self._module_name = None self._kwargs = {} self._web = None self._async", "= 'put') def post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.post(url=path,", "str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.head(url=path, **kwargs) if self._default_mode: return", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.put(url=path, **kwargs) if self._default_mode: return", "'head') def patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.patch(url=path, **kwargs)", "HttpResponse else: _Client, _AsyncClient, HttpResponse = object, object, object class Client: _web: _Client", "return Response(resp = resp, client_type = 'sync', method = 'delete') def get(self, path:", "'head') async def async_patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await", "'async', method = 'put') async def async_post(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "= AsyncHttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in kwargs: headers", "= HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.base_url = base_url or", "bool = False, **kwargs): self.base_url = base_url or self.base_url self.headers = headers or", "return Response(resp = resp, client_type = 'async', method = 'get') async def async_head(self,", "Expects to get data in JSON. If does not get the key, returns", "does not get the key, returns None. Returns the data from a GET", "path: str, key: str = 'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects to get", "headers or self.headers self.config = config or self.config self.async_config = async_config or self.async_config", "**kwargs) -> DataType: \"\"\" Expects to get data in JSON. If does not", "'delete') async def async_get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await", "def async_get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects", "_Client(base_url = base_url, **client_config, **kwargs) @classmethod def create_async_client(cls, base_url: str = \"\", config:", "Type[_AsyncClient]: if not cls._async: cls._async = cls.create_async_client() return cls._async class ApiClient: def __init__(self,", "= self.get_data(path=path, key=key, **kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key)", "default_resp: bool = False, **kwargs): _ensure_api_reqs() self.base_url = \"\" self.headers = {} self.config", "import annotations from lazy.types import * from lazy.models import BaseCls from .config import", "= {} self.config = None self.async_config = None self._module_name = None self._kwargs =", "resp return Response(resp = resp, client_type = 'async', method = 'get') async def", "self._kwargs = kwargs or self._kwargs def reset_clients(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url,", "module_name=self._module_name, base_key=key) ############################################################################# # Async Supplementary Helpful Callers # ############################################################################# async def async_ping(self,", "Response(resp = resp, client_type = 'async', method = 'delete') async def async_get(self, path:", "import * from .utils import convert_to_cls from .base_imports import _httpx_available, _ensure_api_reqs if _httpx_available:", "get data in JSON. If does not get the key, returns None. Returns", "resp return Response(resp = resp, client_type = 'sync', method = 'post') ############################################################################# #", "HttpResponse]: resp = await self.aclient.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "\"\" self.headers = {} self.config = None self.async_config = None self._module_name = None", "def delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.delete(url=path, **kwargs) if", "from httpx import Response as HttpResponse else: _Client, _AsyncClient, HttpResponse = object, object,", "resp return Response(resp = resp, client_type = 'async', method = 'patch') async def", "key, returns None. \"\"\" resp = await self.async_get(url=path, **kwargs) return resp.data.get(key, None) async", "HttpResponse]: resp = await self.aclient.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "= None, **kwargs) -> Type[_AsyncClient]: \"\"\" Creates an async httpx Client\"\"\" _ensure_api_reqs() configz", "Dict[str, Any] = None, **kwargs) -> Type[_AsyncClient]: \"\"\" Creates an async httpx Client\"\"\"", "headers, config = config, async_config = async_config, module_name = module_name, default_resp = default_resp,", "async_config, module_name = module_name, default_resp = default_resp, **kwargs) self._web = None self._async =", "cls._async = cls.create_async_client() return cls._async class ApiClient: def __init__(self, base_url: str = HttpConfigz.base_url", "from .types import * from .utils import convert_to_cls from .base_imports import _httpx_available, _ensure_api_reqs", "= 'post') ############################################################################# # Async REST Methods # ############################################################################# async def async_delete(self, path:", "bool = False, **kwargs): _ensure_api_reqs() self.base_url = \"\" self.headers = {} self.config =", "self._default_mode: return resp return Response(resp = resp, client_type = 'async', method = 'head')", "self.async_get(url=path, **kwargs) return resp.data.get(key, None) async def async_get_lazycls(self, path: str, key: str =", "'sync', method = 'post') ############################################################################# # Async REST Methods # ############################################################################# async def", "None, **kwargs) -> bool: \"\"\" Returns a bool of whether response code is", "min_status_code and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code)) if min_status_code: return bool(res.status_code >", "resp, client_type = 'async', method = 'post') ############################################################################# # Supplementary Helpful Callers #", "# Async REST Methods # ############################################################################# async def async_delete(self, path: str, **kwargs) ->", "self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs) return self._web @property def aclient(self): if", "get the key, returns None. \"\"\" resp = await self.async_get(url=path, **kwargs) return resp.data.get(key,", "HttpResponse]: resp = self.client.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "httpx import Client as _Client from httpx import AsyncClient as _AsyncClient from httpx", "a LazyCls \"\"\" data = self.get_data(path=path, key=key, **kwargs) if not data: return None", "the key, returns None. \"\"\" resp = await self.async_get(url=path, **kwargs) return resp.data.get(key, None)", "AsyncClient as _AsyncClient from httpx import Response as HttpResponse else: _Client, _AsyncClient, HttpResponse", "than an int Can be used as a health check \"\"\" res =", "self._async = None self._default_mode = False self.set_configs(base_url = base_url, headers = headers, config", "default_resp = default_resp, **kwargs) self._web = None self._async = None @property def client(self):", "self._default_mode: return resp return Response(resp = resp, client_type = 'async', method = 'post')", "Client\"\"\" _ensure_api_reqs() configz = AsyncHttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers'", "LazyCls \"\"\" data = self.get_data(path=path, key=key, **kwargs) if not data: return None return", "headers = kwargs.pop('headers') if headers: client_config['headers'] = headers return _Client(base_url = base_url, **client_config,", "min_status_code) return bool(res.status_code < max_status_code) def get_data(self, path: str, key: str = 'data',", "= False, **kwargs): self.base_url = base_url or self.base_url self.headers = headers or self.headers", "None. \"\"\" resp = await self.async_get(url=path, **kwargs) return resp.data.get(key, None) async def async_get_lazycls(self,", "Sync httpx Client\"\"\" _ensure_api_reqs() configz = HttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config", "return Response(resp = resp, client_type = 'sync', method = 'put') def post(self, path:", "# ############################################################################# async def async_delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "get_data(self, path: str, key: str = 'data', **kwargs) -> DataType: \"\"\" Expects to", "base_url, **client_config, **kwargs) @classmethod def create_async_client(cls, base_url: str = \"\", config: Dict[str, Any]", "None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) ############################################################################# # Async Supplementary Helpful Callers # #############################################################################", "* from .utils import convert_to_cls from .base_imports import _httpx_available, _ensure_api_reqs if _httpx_available: from", "not get the key, returns None. Returns the data from a GET request", "in range(min_status_code, max_status_code)) if min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code)", "**self._kwargs) return self._async ############################################################################# # Base REST APIs # ############################################################################# def delete(self, path:", "client_type = 'sync', method = 'patch') def put(self, path: str, **kwargs) -> Union[Response,", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.get(url=path, **kwargs) if self._default_mode:", "**kwargs) return resp.data.get(key, None) def get_lazycls(self, path: str, key: str = 'data', **kwargs)", "return resp return Response(resp = resp, client_type = 'async', method = 'delete') async", "an async httpx Client\"\"\" _ensure_api_reqs() configz = AsyncHttpConfigz() if config: configz.update_config(**config) client_config =", "HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): _ensure_api_reqs() self.base_url = \"\" self.headers", "= Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs) return self._async ############################################################################# # Base REST APIs #", "Can be used as a health check \"\"\" res = self.get(url=path, **kwargs) if", "or self._default_mode self._kwargs = kwargs or self._kwargs def reset_clients(self, base_url: str = HttpConfigz.base_url", "async def async_head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.head(url=path,", "def patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.patch(url=path, **kwargs) if", "############################################################################# async def async_delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await", "def ping(self, path: str, max_status_code: int = 300, min_status_code: int = None, **kwargs)", "resp = self.client.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "False, **kwargs): self.base_url = base_url or self.base_url self.headers = headers or self.headers self.config", "= default_resp or self._default_mode self._kwargs = kwargs or self._kwargs def reset_clients(self, base_url: str", "async_delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.delete(url=path, **kwargs) if", "= resp, client_type = 'async', method = 'get') async def async_head(self, path: str,", "300, min_status_code: int = None, **kwargs) -> bool: \"\"\" Returns a bool of", "a GET request to Path as a LazyCls \"\"\" data = await self.async_get_data(path=path,", "res = self.get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code))", "return Response(resp = resp, client_type = 'async', method = 'head') async def async_patch(self,", "'delete') def get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.get(url=path, **kwargs)", "Any] = None, **kwargs) -> Type[_AsyncClient]: \"\"\" Creates an async httpx Client\"\"\" _ensure_api_reqs()", "**client_config, **kwargs) @classmethod def create_async_client(cls, base_url: str = \"\", config: Dict[str, Any] =", "return resp return Response(resp = resp, client_type = 'async', method = 'post') #############################################################################", "base_url: str = \"\", config: Dict[str, Any] = None, **kwargs) -> Type[_AsyncClient]: \"\"\"", "**kwargs) -> Union[Response, HttpResponse]: resp = self.client.get(url=path, **kwargs) if self._default_mode: return resp return", "= default_resp, **kwargs) def set_configs(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny", "= await self.aclient.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "None self._module_name = None self._kwargs = {} self._web = None self._async = None", "min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) def get_data(self, path: str,", "kwargs: headers = kwargs.pop('headers') if headers: client_config['headers'] = headers return _Client(base_url = base_url,", "async_config = async_config, module_name = module_name, default_resp = default_resp, **kwargs) self._web = None", ".config import * from .types import * from .utils import convert_to_cls from .base_imports", "= resp, client_type = 'async', method = 'patch') async def async_put(self, path: str,", "'put') def post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.post(url=path, **kwargs)", "str = \"\", config: Dict[str, Any] = None, **kwargs) -> Type[_AsyncClient]: \"\"\" Creates", "bool: \"\"\" Returns a bool of whether response code is great/within range/less than", "httpx import AsyncClient as _AsyncClient from httpx import Response as HttpResponse else: _Client,", "str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.patch(url=path, **kwargs) if self._default_mode: return", "= resp, client_type = 'async', method = 'delete') async def async_get(self, path: str,", "code is great/within range/less than an int Can be used as a health", "self.headers self.config = config or self.config self.async_config = async_config or self.async_config self._module_name =", "= async_config, module_name = module_name, default_resp = default_resp, **kwargs) def set_configs(self, base_url: str", "= None self._kwargs = {} self._web = None self._async = None self._default_mode =", "############################################################################# def ping(self, path: str, max_status_code: int = 300, min_status_code: int = None,", "**kwargs) -> Type[BaseCls]: \"\"\" Expects to get data in JSON. If does not", "Client\"\"\" _ensure_api_reqs() configz = HttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers'", "data in JSON. If does not get the key, returns None. Returns the", "= 'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects to get data in JSON. If", "a LazyCls \"\"\" data = await self.async_get_data(path=path, key=key, **kwargs) if not data: return", "= resp, client_type = 'sync', method = 'delete') def get(self, path: str, **kwargs)", "= resp, client_type = 'async', method = 'put') async def async_post(self, path: str,", "############################################################################# # Supplementary Helpful Callers # ############################################################################# def ping(self, path: str, max_status_code: int", "\"\"\"Creates a Sync httpx Client\"\"\" _ensure_api_reqs() configz = HttpConfigz() if config: configz.update_config(**config) client_config", "self.headers = headers or self.headers self.config = config or self.config self.async_config = async_config", "str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.post(url=path, **kwargs) if self._default_mode: return", "def __init__(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config:", "return resp return Response(resp = resp, client_type = 'sync', method = 'patch') def", "range(min_status_code, max_status_code)) if min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) def", "_async: _AsyncClient = None @classmethod def create_client(cls, base_url: str = \"\", config: Dict[str,", "Response as HttpResponse else: _Client, _AsyncClient, HttpResponse = object, object, object class Client:", "async def async_get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.get(url=path,", "cls.create_async_client() return cls._async class ApiClient: def __init__(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url,", "async_client(cls) -> Type[_AsyncClient]: if not cls._async: cls._async = cls.create_async_client() return cls._async class ApiClient:", "import * from lazy.models import BaseCls from .config import * from .types import", "= base_url, headers = headers, config = config, async_config = async_config, module_name =", "max_status_code: int = 300, min_status_code: int = None, **kwargs) -> bool: \"\"\" Returns", "from a GET request to Path as a LazyCls \"\"\" data = await", "import _httpx_available, _ensure_api_reqs if _httpx_available: from httpx import Client as _Client from httpx", "= self.get(url=path, **kwargs) return resp.data.get(key, None) def get_lazycls(self, path: str, key: str =", "Union[Response, HttpResponse]: resp = self.client.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "REST Methods # ############################################################################# async def async_delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) def get_data(self, path: str, key: str", "= resp, client_type = 'async', method = 'post') ############################################################################# # Supplementary Helpful Callers", "return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) ############################################################################# # Async Supplementary Helpful Callers # ############################################################################# async", "**kwargs) -> Union[Response, HttpResponse]: resp = self.client.put(url=path, **kwargs) if self._default_mode: return resp return", "int = 300, min_status_code: int = None, **kwargs) -> bool: \"\"\" Returns a", "str = 'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects to get data in JSON.", "**kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'sync', method", "from httpx import Client as _Client from httpx import AsyncClient as _AsyncClient from", "self._module_name = None self._kwargs = {} self._web = None self._async = None self._default_mode", "configz = HttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in kwargs:", "default_resp, **kwargs) self._web = None self._async = None @property def client(self): if not", "resp = self.client.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "def aclient(self): if not self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs) return self._async", "None self._default_mode = False self.set_configs(base_url = base_url, headers = headers, config = config,", "Union[Response, HttpResponse]: resp = await self.aclient.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "<reponame>trisongz/lazycls from __future__ import annotations from lazy.types import * from lazy.models import BaseCls", "module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.base_url =", "= self.client.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "cls._web: cls._web = cls.create_client() return cls._web @classproperty def async_client(cls) -> Type[_AsyncClient]: if not", "data from a GET request to Path as a LazyCls \"\"\" data =", "self.config self.async_config = async_config or self.async_config self._module_name = module_name or self._module_name self._default_mode =", "does not get the key, returns None. \"\"\" resp = self.get(url=path, **kwargs) return", "reset_clients(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny", "async_head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.head(url=path, **kwargs) if", "**kwargs) -> Union[Response, HttpResponse]: resp = self.client.post(url=path, **kwargs) if self._default_mode: return resp return", "return resp return Response(resp = resp, client_type = 'sync', method = 'head') def", "= None @classmethod def create_client(cls, base_url: str = \"\", config: Dict[str, Any] =", "config: DictAny = None, async_config: DictAny = None, module_name: str = HttpConfigz.module_name or", "= 'post') ############################################################################# # Supplementary Helpful Callers # ############################################################################# def ping(self, path: str,", "self.get_data(path=path, key=key, **kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) #############################################################################", "def set_configs(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config:", "from lazy.types import * from lazy.models import BaseCls from .config import * from", "None. Returns the data from a GET request to Path as a LazyCls", "HttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in kwargs: headers =", "async def async_put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.put(url=path,", "**kwargs) if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code)) if min_status_code: return", "client_config['headers'] = headers return _Client(base_url = base_url, **client_config, **kwargs) @classmethod def create_async_client(cls, base_url:", "await self.aclient.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "**kwargs): _ensure_api_reqs() self.base_url = \"\" self.headers = {} self.config = None self.async_config =", "def async_client(cls) -> Type[_AsyncClient]: if not cls._async: cls._async = cls.create_async_client() return cls._async class", "= {}, config: DictAny = None, async_config: DictAny = None, module_name: str =", "method = 'get') def head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "**kwargs) -> bool: \"\"\" Returns a bool of whether response code is great/within", "from __future__ import annotations from lazy.types import * from lazy.models import BaseCls from", "def client(cls) -> Type[_Client]: if not cls._web: cls._web = cls.create_client() return cls._web @classproperty", "= 'sync', method = 'put') def post(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "from httpx import AsyncClient as _AsyncClient from httpx import Response as HttpResponse else:", "Methods # ############################################################################# async def async_delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "object, object, object class Client: _web: _Client = None _async: _AsyncClient = None", "LazyCls \"\"\" data = await self.async_get_data(path=path, key=key, **kwargs) if not data: return None", "-> Union[Response, HttpResponse]: resp = await self.aclient.put(url=path, **kwargs) if self._default_mode: return resp return", "'put') async def async_post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await", "**kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient = ApiClient", "= None @property def client(self): if not self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers,", "self.aclient.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'async',", "Union[Response, HttpResponse]: resp = await self.aclient.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "await self.aclient.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "not cls._async: cls._async = cls.create_async_client() return cls._async class ApiClient: def __init__(self, base_url: str", "return Response(resp = resp, client_type = 'sync', method = 'patch') def put(self, path:", "client_type = 'async', method = 'get') async def async_head(self, path: str, **kwargs) ->", "-> Union[Response, HttpResponse]: resp = await self.aclient.get(url=path, **kwargs) if self._default_mode: return resp return", "create_async_client(cls, base_url: str = \"\", config: Dict[str, Any] = None, **kwargs) -> Type[_AsyncClient]:", "**kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.patch(url=path, **kwargs) if self._default_mode: return resp", "\"\", config: Dict[str, Any] = None, **kwargs) -> Type[_AsyncClient]: \"\"\" Creates an async", "client_type = 'sync', method = 'put') def post(self, path: str, **kwargs) -> Union[Response,", "kwargs.pop('headers') if headers: client_config['headers'] = headers return _AsyncClient(base_url = base_url, **client_config, **kwargs) @classproperty", "= 'head') async def async_patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "bool of whether response code is great/within range/less than an int Can be", "HttpResponse]: resp = await self.aclient.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "resp, client_type = 'async', method = 'patch') async def async_put(self, path: str, **kwargs)", "headers: client_config['headers'] = headers return _Client(base_url = base_url, **client_config, **kwargs) @classmethod def create_async_client(cls,", "bool = False, **kwargs): self.set_configs(base_url = base_url, headers = headers, config = config,", "the key, returns None. Returns the data from a GET request to Path", "str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny = None,", "Type[_AsyncClient]: \"\"\" Creates an async httpx Client\"\"\" _ensure_api_reqs() configz = AsyncHttpConfigz() if config:", "str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): _ensure_api_reqs() self.base_url =", "**kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.get(url=path, **kwargs) if self._default_mode: return resp", "HttpResponse = object, object, object class Client: _web: _Client = None _async: _AsyncClient", "if not self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs) return self._async ############################################################################# #", "await self.aclient.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "= 300, min_status_code: int = None, **kwargs) -> bool: \"\"\" Returns a bool", "= resp, client_type = 'sync', method = 'put') def post(self, path: str, **kwargs)", "import Client as _Client from httpx import AsyncClient as _AsyncClient from httpx import", "not cls._web: cls._web = cls.create_client() return cls._web @classproperty def async_client(cls) -> Type[_AsyncClient]: if", "Can be used as a health check \"\"\" res = await self.async_get(url=path, **kwargs)", "'get') def head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.head(url=path, **kwargs)", "def get_data(self, path: str, key: str = 'data', **kwargs) -> DataType: \"\"\" Expects", "= self.client.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "= None self.async_config = None self._module_name = None self._kwargs = {} self._web =", "**kwargs): self.set_configs(base_url = base_url, headers = headers, config = config, async_config = async_config,", "@classproperty def async_client(cls) -> Type[_AsyncClient]: if not cls._async: cls._async = cls.create_async_client() return cls._async", "resp, client_type = 'sync', method = 'get') def head(self, path: str, **kwargs) ->", "None self._async = None self._default_mode = False self.set_configs(base_url = base_url, headers = headers,", "**kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'async', method", "**kwargs) @classmethod def create_async_client(cls, base_url: str = \"\", config: Dict[str, Any] = None,", "= None, **kwargs) -> bool: \"\"\" Returns a bool of whether response code", "__future__ import annotations from lazy.types import * from lazy.models import BaseCls from .config", "cls._web @classproperty def async_client(cls) -> Type[_AsyncClient]: if not cls._async: cls._async = cls.create_async_client() return", "default_resp: bool = False, **kwargs): self.base_url = base_url or self.base_url self.headers = headers", "self._default_mode: return resp return Response(resp = resp, client_type = 'sync', method = 'head')", "client_config['headers'] = headers return _AsyncClient(base_url = base_url, **client_config, **kwargs) @classproperty def client(cls) ->", "= None self._async = None @property def client(self): if not self._web: self._web =", "max_status_code: return bool(res.status_code in range(min_status_code, max_status_code)) if min_status_code: return bool(res.status_code > min_status_code) return", "return resp.data.get(key, None) def get_lazycls(self, path: str, key: str = 'data', **kwargs) ->", "JSON. If does not get the key, returns None. Returns the data from", "data = self.get_data(path=path, key=key, **kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name,", "Path as a LazyCls \"\"\" data = await self.async_get_data(path=path, key=key, **kwargs) if not", "return cls._web @classproperty def async_client(cls) -> Type[_AsyncClient]: if not cls._async: cls._async = cls.create_async_client()", "class ApiClient: def __init__(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny =", "= HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny = None, async_config:", "= HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): _ensure_api_reqs() self.base_url = \"\"", "= module_name, default_resp = default_resp, **kwargs) self._web = None self._async = None @property", "the key, returns None. \"\"\" resp = self.get(url=path, **kwargs) return resp.data.get(key, None) def", "client_type = 'async', method = 'delete') async def async_get(self, path: str, **kwargs) ->", "resp return Response(resp = resp, client_type = 'sync', method = 'get') def head(self,", "= await self.aclient.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) ############################################################################# # Async Supplementary Helpful Callers", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.patch(url=path, **kwargs) if self._default_mode:", "None @classmethod def create_client(cls, base_url: str = \"\", config: Dict[str, Any] = None,", "module_name=self._module_name, base_key=key) APIClient = ApiClient __all__ = [ 'Client', 'HttpResponse', 'ApiClient', 'APIClient', '_Client',", "\"\"\" Expects to get data in JSON. If does not get the key,", "httpx Client\"\"\" _ensure_api_reqs() configz = AsyncHttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if", "base_url or self.base_url self.headers = headers or self.headers self.config = config or self.config", "convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient = ApiClient __all__ = [ 'Client', 'HttpResponse', 'ApiClient', 'APIClient',", "_Client = None _async: _AsyncClient = None @classmethod def create_client(cls, base_url: str =", "default_resp = default_resp, **kwargs) def set_configs(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers:", "= \"\" self.headers = {} self.config = None self.async_config = None self._module_name =", "cls._async: cls._async = cls.create_async_client() return cls._async class ApiClient: def __init__(self, base_url: str =", "self._module_name self._default_mode = default_resp or self._default_mode self._kwargs = kwargs or self._kwargs def reset_clients(self,", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.head(url=path, **kwargs) if self._default_mode: return", "> min_status_code) return bool(res.status_code < max_status_code) async def async_get_data(self, path: str, key: str", "async_get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects to", "None) def get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]: \"\"\"", "return self._async ############################################################################# # Base REST APIs # ############################################################################# def delete(self, path: str,", "_ensure_api_reqs if _httpx_available: from httpx import Client as _Client from httpx import AsyncClient", "Union[Response, HttpResponse]: resp = await self.aclient.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "= headers or self.headers self.config = config or self.config self.async_config = async_config or", "return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) ############################################################################# # Async Supplementary Helpful Callers #", "await self.async_get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code)) if", "AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.base_url = base_url or self.base_url self.headers =", "async_post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.post(url=path, **kwargs) if", "return self._web @property def aclient(self): if not self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers,", "= headers return _AsyncClient(base_url = base_url, **client_config, **kwargs) @classproperty def client(cls) -> Type[_Client]:", "default_resp or self._default_mode self._kwargs = kwargs or self._kwargs def reset_clients(self, base_url: str =", "self._default_mode: return resp return Response(resp = resp, client_type = 'sync', method = 'patch')", "from .utils import convert_to_cls from .base_imports import _httpx_available, _ensure_api_reqs if _httpx_available: from httpx", "self.client.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'sync',", "return resp return Response(resp = resp, client_type = 'async', method = 'get') async", "return Response(resp = resp, client_type = 'async', method = 'post') ############################################################################# # Supplementary", "-> Type[BaseCls]: \"\"\" Expects to get data in JSON. If does not get", "return bool(res.status_code < max_status_code) async def async_get_data(self, path: str, key: str = 'data',", "health check \"\"\" res = await self.async_get(url=path, **kwargs) if min_status_code and max_status_code: return", "self._web @property def aclient(self): if not self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs)", "__init__(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny", "async_get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.get(url=path, **kwargs) if", "to Path as a LazyCls \"\"\" data = await self.async_get_data(path=path, key=key, **kwargs) if", "str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.put(url=path, **kwargs) if self._default_mode: return", "return Response(resp = resp, client_type = 'sync', method = 'get') def head(self, path:", "_AsyncClient = None @classmethod def create_client(cls, base_url: str = \"\", config: Dict[str, Any]", "{} self._web = None self._async = None self._default_mode = False self.set_configs(base_url = base_url,", "-> Union[Response, HttpResponse]: resp = self.client.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "if self._default_mode: return resp return Response(resp = resp, client_type = 'async', method =", "not get the key, returns None. \"\"\" resp = await self.async_get(url=path, **kwargs) return", "method = 'head') async def async_patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "max_status_code) async def async_get_data(self, path: str, key: str = 'data', **kwargs) -> DataType:", "resp.data.get(key, None) async def async_get_lazycls(self, path: str, key: str = 'data', **kwargs) ->", ".base_imports import _httpx_available, _ensure_api_reqs if _httpx_available: from httpx import Client as _Client from", "= 'delete') def get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.get(url=path,", "return bool(res.status_code < max_status_code) def get_data(self, path: str, key: str = 'data', **kwargs)", "= False, **kwargs): _ensure_api_reqs() self.base_url = \"\" self.headers = {} self.config = None", "'async', method = 'patch') async def async_put(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "method = 'patch') async def async_put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "_Client, _AsyncClient, HttpResponse = object, object, object class Client: _web: _Client = None", "############################################################################# # Async Supplementary Helpful Callers # ############################################################################# async def async_ping(self, path: str,", "self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs) return self._async ############################################################################# # Base REST", "**kwargs) -> Union[Response, HttpResponse]: resp = self.client.patch(url=path, **kwargs) if self._default_mode: return resp return", "self.async_get_data(path=path, key=key, **kwargs) if not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient", "'headers' in kwargs: headers = kwargs.pop('headers') if headers: client_config['headers'] = headers return _Client(base_url", "kwargs: headers = kwargs.pop('headers') if headers: client_config['headers'] = headers return _AsyncClient(base_url = base_url,", "self.base_url self.headers = headers or self.headers self.config = config or self.config self.async_config =", "to get data in JSON. If does not get the key, returns None.", "-> Union[Response, HttpResponse]: resp = self.client.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "Any] = None, **kwargs) -> Type[_Client]: \"\"\"Creates a Sync httpx Client\"\"\" _ensure_api_reqs() configz", "method = 'patch') def put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "return cls._async class ApiClient: def __init__(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers:", "not self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs) return self._async ############################################################################# # Base", "def async_post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.post(url=path, **kwargs)", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.get(url=path, **kwargs) if self._default_mode: return", "= HttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in kwargs: headers", "@classmethod def create_async_client(cls, base_url: str = \"\", config: Dict[str, Any] = None, **kwargs)", "headers return _AsyncClient(base_url = base_url, **client_config, **kwargs) @classproperty def client(cls) -> Type[_Client]: if", "resp return Response(resp = resp, client_type = 'async', method = 'post') ############################################################################# #", "def async_get_data(self, path: str, key: str = 'data', **kwargs) -> DataType: \"\"\" Expects", "config, async_config = async_config, module_name = module_name, default_resp = default_resp, **kwargs) self._web =", "self._module_name = module_name or self._module_name self._default_mode = default_resp or self._default_mode self._kwargs = kwargs", "self.aclient.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'async',", "self.get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code)) if min_status_code:", "**kwargs) -> Type[_AsyncClient]: \"\"\" Creates an async httpx Client\"\"\" _ensure_api_reqs() configz = AsyncHttpConfigz()", "Creates an async httpx Client\"\"\" _ensure_api_reqs() configz = AsyncHttpConfigz() if config: configz.update_config(**config) client_config", "'sync', method = 'patch') def put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "= await self.aclient.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "self.base_url = base_url or self.base_url self.headers = headers or self.headers self.config = config", "resp return Response(resp = resp, client_type = 'sync', method = 'head') def patch(self,", "not data: return None return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient = ApiClient __all__ =", "= 'head') def patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.patch(url=path,", "= headers, config = config, async_config = async_config, module_name = module_name, default_resp =", "_AsyncClient(base_url = base_url, **client_config, **kwargs) @classproperty def client(cls) -> Type[_Client]: if not cls._web:", "= kwargs or self._kwargs def reset_clients(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers:", "\"\"\" data = self.get_data(path=path, key=key, **kwargs) if not data: return None return convert_to_cls(resp=data,", "= cls.create_async_client() return cls._async class ApiClient: def __init__(self, base_url: str = HttpConfigz.base_url or", "Union[Response, HttpResponse]: resp = await self.aclient.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "resp = await self.aclient.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "min_status_code) return bool(res.status_code < max_status_code) async def async_get_data(self, path: str, key: str =", "self._default_mode: return resp return Response(resp = resp, client_type = 'sync', method = 'put')", "str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.head(url=path, **kwargs) if self._default_mode: return resp", "resp return Response(resp = resp, client_type = 'sync', method = 'patch') def put(self,", "Async Supplementary Helpful Callers # ############################################################################# async def async_ping(self, path: str, max_status_code: int", "resp.data.get(key, None) def get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]:", "in kwargs: headers = kwargs.pop('headers') if headers: client_config['headers'] = headers return _AsyncClient(base_url =", "a bool of whether response code is great/within range/less than an int Can", "returns None. \"\"\" resp = self.get(url=path, **kwargs) return resp.data.get(key, None) def get_lazycls(self, path:", "############################################################################# async def async_ping(self, path: str, max_status_code: int = 300, min_status_code: int =", "client_type = 'async', method = 'post') ############################################################################# # Supplementary Helpful Callers # #############################################################################", "async def async_get_data(self, path: str, key: str = 'data', **kwargs) -> DataType: \"\"\"", "\"\"\" Creates an async httpx Client\"\"\" _ensure_api_reqs() configz = AsyncHttpConfigz() if config: configz.update_config(**config)", "import convert_to_cls from .base_imports import _httpx_available, _ensure_api_reqs if _httpx_available: from httpx import Client", "APIs # ############################################################################# def delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "async httpx Client\"\"\" _ensure_api_reqs() configz = AsyncHttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config", "and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code)) if min_status_code: return bool(res.status_code > min_status_code)", "post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.post(url=path, **kwargs) if self._default_mode:", "data = await self.async_get_data(path=path, key=key, **kwargs) if not data: return None return convert_to_cls(resp=data,", "configz.httpx_config if 'headers' in kwargs: headers = kwargs.pop('headers') if headers: client_config['headers'] = headers", "async def async_patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.patch(url=path,", "client(self): if not self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs) return self._web @property", "= 'async', method = 'delete') async def async_get(self, path: str, **kwargs) -> Union[Response,", "str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.put(url=path, **kwargs) if self._default_mode: return resp", "self._async ############################################################################# # Base REST APIs # ############################################################################# def delete(self, path: str, **kwargs)", "self.aclient.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'async',", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.patch(url=path, **kwargs) if self._default_mode: return", "self._default_mode: return resp return Response(resp = resp, client_type = 'sync', method = 'get')", "Union[Response, HttpResponse]: resp = await self.aclient.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "Union[Response, HttpResponse]: resp = self.client.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "_httpx_available, _ensure_api_reqs if _httpx_available: from httpx import Client as _Client from httpx import", "Response(resp = resp, client_type = 'sync', method = 'head') def patch(self, path: str,", "# ############################################################################# async def async_ping(self, path: str, max_status_code: int = 300, min_status_code: int", "AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): _ensure_api_reqs() self.base_url = \"\" self.headers = {}", "self.client.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'sync',", "client_type = 'async', method = 'head') async def async_patch(self, path: str, **kwargs) ->", "convert_to_cls from .base_imports import _httpx_available, _ensure_api_reqs if _httpx_available: from httpx import Client as", "if not cls._async: cls._async = cls.create_async_client() return cls._async class ApiClient: def __init__(self, base_url:", "= False, **kwargs): self.set_configs(base_url = base_url, headers = headers, config = config, async_config", "############################################################################# # Base REST APIs # ############################################################################# def delete(self, path: str, **kwargs) ->", "if self._default_mode: return resp return Response(resp = resp, client_type = 'sync', method =", "from lazy.models import BaseCls from .config import * from .types import * from", "Client: _web: _Client = None _async: _AsyncClient = None @classmethod def create_client(cls, base_url:", "self._web = None self._async = None @property def client(self): if not self._web: self._web", "= \"\", config: Dict[str, Any] = None, **kwargs) -> Type[_Client]: \"\"\"Creates a Sync", "bool(res.status_code < max_status_code) async def async_get_data(self, path: str, key: str = 'data', **kwargs)", "resp return Response(resp = resp, client_type = 'sync', method = 'put') def post(self,", "HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.base_url = base_url or self.base_url", "method = 'head') def patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "def reset_clients(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config:", "cls._async class ApiClient: def __init__(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny", "return _AsyncClient(base_url = base_url, **client_config, **kwargs) @classproperty def client(cls) -> Type[_Client]: if not", "method = 'delete') def get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.head(url=path, **kwargs) if self._default_mode:", "resp = self.client.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "range/less than an int Can be used as a health check \"\"\" res", "return resp return Response(resp = resp, client_type = 'sync', method = 'post') #############################################################################", "**kwargs) @classproperty def client(cls) -> Type[_Client]: if not cls._web: cls._web = cls.create_client() return", "= kwargs.pop('headers') if headers: client_config['headers'] = headers return _AsyncClient(base_url = base_url, **client_config, **kwargs)", "str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.get(url=path, **kwargs) if self._default_mode: return resp", "-> bool: \"\"\" Returns a bool of whether response code is great/within range/less", "bool(res.status_code < max_status_code) def get_data(self, path: str, key: str = 'data', **kwargs) ->", "AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny = None, async_config: DictAny = None,", "str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.patch(url=path, **kwargs) if self._default_mode: return resp", "method = 'post') ############################################################################# # Supplementary Helpful Callers # ############################################################################# def ping(self, path:", "= None, **kwargs) -> Type[_Client]: \"\"\"Creates a Sync httpx Client\"\"\" _ensure_api_reqs() configz =", "str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.get(url=path, **kwargs) if self._default_mode: return", "return Response(resp = resp, client_type = 'async', method = 'patch') async def async_put(self,", "\"\"\" resp = await self.async_get(url=path, **kwargs) return resp.data.get(key, None) async def async_get_lazycls(self, path:", "return convert_to_cls(resp=data, module_name=self._module_name, base_key=key) APIClient = ApiClient __all__ = [ 'Client', 'HttpResponse', 'ApiClient',", "Response(resp = resp, client_type = 'sync', method = 'delete') def get(self, path: str,", "resp = self.client.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) async def async_get_data(self, path: str, key:", "base_key=key) APIClient = ApiClient __all__ = [ 'Client', 'HttpResponse', 'ApiClient', 'APIClient', '_Client', '_AsyncClient'", "async_config or self.async_config self._module_name = module_name or self._module_name self._default_mode = default_resp or self._default_mode", "None _async: _AsyncClient = None @classmethod def create_client(cls, base_url: str = \"\", config:", "= base_url, **client_config, **kwargs) @classmethod def create_async_client(cls, base_url: str = \"\", config: Dict[str,", "or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.set_configs(base_url = base_url, headers = headers,", "self.aclient.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'async',", "'data', **kwargs) -> DataType: \"\"\" Expects to get data in JSON. If does", "as _Client from httpx import AsyncClient as _AsyncClient from httpx import Response as", "= \"\", config: Dict[str, Any] = None, **kwargs) -> Type[_AsyncClient]: \"\"\" Creates an", "@classmethod def create_client(cls, base_url: str = \"\", config: Dict[str, Any] = None, **kwargs)", "HttpResponse]: resp = self.client.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.delete(url=path, **kwargs) if self._default_mode: return", "resp return Response(resp = resp, client_type = 'async', method = 'head') async def", "resp = await self.aclient.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "Returns a bool of whether response code is great/within range/less than an int", "lazy.models import BaseCls from .config import * from .types import * from .utils", "async def async_delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.delete(url=path,", "def create_client(cls, base_url: str = \"\", config: Dict[str, Any] = None, **kwargs) ->", "\"\"\" res = await self.async_get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in", "-> Type[_Client]: \"\"\"Creates a Sync httpx Client\"\"\" _ensure_api_reqs() configz = HttpConfigz() if config:", "= self.client.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.put(url=path, **kwargs) if self._default_mode:", "str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.post(url=path, **kwargs) if self._default_mode: return resp", "_Client from httpx import AsyncClient as _AsyncClient from httpx import Response as HttpResponse", "httpx import Response as HttpResponse else: _Client, _AsyncClient, HttpResponse = object, object, object", "str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.set_configs(base_url = base_url,", "**kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.post(url=path, **kwargs) if self._default_mode: return resp", "= await self.aclient.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type", "Helpful Callers # ############################################################################# async def async_ping(self, path: str, max_status_code: int = 300,", "str = \"\", config: Dict[str, Any] = None, **kwargs) -> Type[_Client]: \"\"\"Creates a", "if 'headers' in kwargs: headers = kwargs.pop('headers') if headers: client_config['headers'] = headers return", "client_config = configz.httpx_config if 'headers' in kwargs: headers = kwargs.pop('headers') if headers: client_config['headers']", "get data in JSON. If does not get the key, returns None. \"\"\"", "in JSON. If does not get the key, returns None. Returns the data", "max_status_code)) if min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) def get_data(self,", "resp, client_type = 'async', method = 'head') async def async_patch(self, path: str, **kwargs)", "'sync', method = 'get') def head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "async def async_get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]: \"\"\"", "headers return _Client(base_url = base_url, **client_config, **kwargs) @classmethod def create_async_client(cls, base_url: str =", "resp return Response(resp = resp, client_type = 'async', method = 'delete') async def", "resp = await self.aclient.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "async_put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.put(url=path, **kwargs) if", "returns None. Returns the data from a GET request to Path as a", "return Response(resp = resp, client_type = 'sync', method = 'head') def patch(self, path:", "= base_url or self.base_url self.headers = headers or self.headers self.config = config or", "HttpResponse]: resp = self.client.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "'async', method = 'post') ############################################################################# # Supplementary Helpful Callers # ############################################################################# def ping(self,", "to Path as a LazyCls \"\"\" data = self.get_data(path=path, key=key, **kwargs) if not", "@classproperty def client(cls) -> Type[_Client]: if not cls._web: cls._web = cls.create_client() return cls._web", "= None, async_config: DictAny = None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp:", "= async_config or self.async_config self._module_name = module_name or self._module_name self._default_mode = default_resp or", "HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny = None, async_config: DictAny", "None, async_config: DictAny = None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool", "async_ping(self, path: str, max_status_code: int = 300, min_status_code: int = None, **kwargs) ->", "'async', method = 'get') async def async_head(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "= self.client.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "base_key=key) ############################################################################# # Async Supplementary Helpful Callers # ############################################################################# async def async_ping(self, path:", "{} self.config = None self.async_config = None self._module_name = None self._kwargs = {}", "= headers return _Client(base_url = base_url, **client_config, **kwargs) @classmethod def create_async_client(cls, base_url: str", "great/within range/less than an int Can be used as a health check \"\"\"", "the data from a GET request to Path as a LazyCls \"\"\" data", "BaseCls from .config import * from .types import * from .utils import convert_to_cls", "= kwargs.pop('headers') if headers: client_config['headers'] = headers return _Client(base_url = base_url, **client_config, **kwargs)", "client_type = 'async', method = 'patch') async def async_put(self, path: str, **kwargs) ->", "= None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs):", "resp, client_type = 'async', method = 'get') async def async_head(self, path: str, **kwargs)", "int = None, **kwargs) -> bool: \"\"\" Returns a bool of whether response", "return Response(resp = resp, client_type = 'sync', method = 'post') ############################################################################# # Async", "HttpResponse]: resp = self.client.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp,", "= 'async', method = 'get') async def async_head(self, path: str, **kwargs) -> Union[Response,", "self.client.head(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type = 'sync',", "not self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs) return self._web @property def aclient(self):", "self._kwargs = {} self._web = None self._async = None self._default_mode = False self.set_configs(base_url", "def async_patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.patch(url=path, **kwargs)", "config=self.config, headers=self.headers, **self._kwargs) return self._web @property def aclient(self): if not self._async: self._async =", "Response(resp = resp, client_type = 'sync', method = 'patch') def put(self, path: str,", "def create_async_client(cls, base_url: str = \"\", config: Dict[str, Any] = None, **kwargs) ->", "def get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects", "method = 'delete') async def async_get(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "_httpx_available: from httpx import Client as _Client from httpx import AsyncClient as _AsyncClient", "* from lazy.models import BaseCls from .config import * from .types import *", "or self.headers self.config = config or self.config self.async_config = async_config or self.async_config self._module_name", "patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.patch(url=path, **kwargs) if self._default_mode:", "def post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.post(url=path, **kwargs) if", "if headers: client_config['headers'] = headers return _Client(base_url = base_url, **client_config, **kwargs) @classmethod def", "of whether response code is great/within range/less than an int Can be used", "def async_ping(self, path: str, max_status_code: int = 300, min_status_code: int = None, **kwargs)", "max_status_code)) if min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) async def", "GET request to Path as a LazyCls \"\"\" data = self.get_data(path=path, key=key, **kwargs)", "base_url, **client_config, **kwargs) @classproperty def client(cls) -> Type[_Client]: if not cls._web: cls._web =", "or self.async_config self._module_name = module_name or self._module_name self._default_mode = default_resp or self._default_mode self._kwargs", "Callers # ############################################################################# async def async_ping(self, path: str, max_status_code: int = 300, min_status_code:", "HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.set_configs(base_url = base_url, headers =", "None, **kwargs) -> Type[_AsyncClient]: \"\"\" Creates an async httpx Client\"\"\" _ensure_api_reqs() configz =", "= config, async_config = async_config, module_name = module_name, default_resp = default_resp, **kwargs) self._web", "\"\"\" Returns a bool of whether response code is great/within range/less than an", "**kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.delete(url=path, **kwargs) if self._default_mode: return resp", "def async_delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.delete(url=path, **kwargs)", "await self.aclient.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "**self._kwargs) return self._web @property def aclient(self): if not self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config,", "client(cls) -> Type[_Client]: if not cls._web: cls._web = cls.create_client() return cls._web @classproperty def", "-> Type[_AsyncClient]: if not cls._async: cls._async = cls.create_async_client() return cls._async class ApiClient: def", "Base REST APIs # ############################################################################# def delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "= resp, client_type = 'sync', method = 'patch') def put(self, path: str, **kwargs)", "return resp return Response(resp = resp, client_type = 'sync', method = 'put') def", "health check \"\"\" res = self.get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code", "method = 'put') async def async_post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "**client_config, **kwargs) @classproperty def client(cls) -> Type[_Client]: if not cls._web: cls._web = cls.create_client()", "async_config, module_name = module_name, default_resp = default_resp, **kwargs) def set_configs(self, base_url: str =", "module_name = module_name, default_resp = default_resp, **kwargs) self._web = None self._async = None", "self._default_mode: return resp return Response(resp = resp, client_type = 'async', method = 'get')", "or self.config self.async_config = async_config or self.async_config self._module_name = module_name or self._module_name self._default_mode", "bool(res.status_code in range(min_status_code, max_status_code)) if min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code <", "if not self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs) return self._web @property def", "base_url: str = \"\", config: Dict[str, Any] = None, **kwargs) -> Type[_Client]: \"\"\"Creates", "-> Union[Response, HttpResponse]: resp = self.client.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "Type[_Client]: if not cls._web: cls._web = cls.create_client() return cls._web @classproperty def async_client(cls) ->", "= default_resp, **kwargs) self._web = None self._async = None @property def client(self): if", "REST APIs # ############################################################################# def delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "-> Union[Response, HttpResponse]: resp = self.client.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp", "request to Path as a LazyCls \"\"\" data = await self.async_get_data(path=path, key=key, **kwargs)", "configz.update_config(**config) client_config = configz.httpx_config if 'headers' in kwargs: headers = kwargs.pop('headers') if headers:", "= await self.async_get(url=path, **kwargs) return resp.data.get(key, None) async def async_get_lazycls(self, path: str, key:", "@property def aclient(self): if not self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs) return", "module_name or self._module_name self._default_mode = default_resp or self._default_mode self._kwargs = kwargs or self._kwargs", "class Client: _web: _Client = None _async: _AsyncClient = None @classmethod def create_client(cls,", "= None self._module_name = None self._kwargs = {} self._web = None self._async =", "If does not get the key, returns None. \"\"\" resp = self.get(url=path, **kwargs)", "############################################################################# # Async REST Methods # ############################################################################# async def async_delete(self, path: str, **kwargs)", "in kwargs: headers = kwargs.pop('headers') if headers: client_config['headers'] = headers return _Client(base_url =", "> min_status_code) return bool(res.status_code < max_status_code) def get_data(self, path: str, key: str =", "get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]: \"\"\" Expects to", "config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in kwargs: headers = kwargs.pop('headers') if", "**kwargs) def set_configs(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {},", "async_patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.patch(url=path, **kwargs) if", "Response(resp = resp, client_type = 'async', method = 'put') async def async_post(self, path:", "async def async_ping(self, path: str, max_status_code: int = 300, min_status_code: int = None,", "returns None. \"\"\" resp = await self.async_get(url=path, **kwargs) return resp.data.get(key, None) async def", "self.async_config self._module_name = module_name or self._module_name self._default_mode = default_resp or self._default_mode self._kwargs =", "config = config, async_config = async_config, module_name = module_name, default_resp = default_resp, **kwargs)", "kwargs.pop('headers') if headers: client_config['headers'] = headers return _Client(base_url = base_url, **client_config, **kwargs) @classmethod", "= 'async', method = 'patch') async def async_put(self, path: str, **kwargs) -> Union[Response,", "aclient(self): if not self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs) return self._async #############################################################################", "ping(self, path: str, max_status_code: int = 300, min_status_code: int = None, **kwargs) ->", "'post') ############################################################################# # Async REST Methods # ############################################################################# async def async_delete(self, path: str,", "await self.aclient.post(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "= Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs) return self._web @property def aclient(self): if not self._async:", "False self.set_configs(base_url = base_url, headers = headers, config = config, async_config = async_config,", "path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.post(url=path, **kwargs) if self._default_mode:", "in JSON. If does not get the key, returns None. \"\"\" resp =", "= module_name or self._module_name self._default_mode = default_resp or self._default_mode self._kwargs = kwargs or", "if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in kwargs: headers = kwargs.pop('headers')", "configz = AsyncHttpConfigz() if config: configz.update_config(**config) client_config = configz.httpx_config if 'headers' in kwargs:", "self._default_mode = False self.set_configs(base_url = base_url, headers = headers, config = config, async_config", "= 'sync', method = 'post') ############################################################################# # Async REST Methods # ############################################################################# async", "or self._module_name self._default_mode = default_resp or self._default_mode self._kwargs = kwargs or self._kwargs def", "str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.delete(url=path, **kwargs) if self._default_mode: return resp", "def head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = self.client.head(url=path, **kwargs) if", "HttpResponse]: resp = await self.aclient.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "= resp, client_type = 'sync', method = 'head') def patch(self, path: str, **kwargs)", "key: str = 'data', **kwargs) -> DataType: \"\"\" Expects to get data in", "self.headers = {} self.config = None self.async_config = None self._module_name = None self._kwargs", "client_type = 'sync', method = 'delete') def get(self, path: str, **kwargs) -> Union[Response,", "default_resp, **kwargs) def set_configs(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny =", "method = 'get') async def async_head(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp", "client_type = 'async', method = 'put') async def async_post(self, path: str, **kwargs) ->", "If does not get the key, returns None. \"\"\" resp = await self.async_get(url=path,", "= self.client.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "str, key: str = 'data', **kwargs) -> DataType: \"\"\" Expects to get data", "else: _Client, _AsyncClient, HttpResponse = object, object, object class Client: _web: _Client =", "def client(self): if not self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs) return self._web", "is great/within range/less than an int Can be used as a health check", "as a health check \"\"\" res = await self.async_get(url=path, **kwargs) if min_status_code and", "int Can be used as a health check \"\"\" res = self.get(url=path, **kwargs)", "None self._async = None @property def client(self): if not self._web: self._web = Client.create_client(base_url=self.base_url,", "None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.set_configs(base_url", "key, returns None. \"\"\" resp = self.get(url=path, **kwargs) return resp.data.get(key, None) def get_lazycls(self,", "path: str, key: str = 'data', **kwargs) -> DataType: \"\"\" Expects to get", "headers: DictAny = {}, config: DictAny = None, async_config: DictAny = None, module_name:", "return resp return Response(resp = resp, client_type = 'sync', method = 'get') def", "= self.client.get(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "not get the key, returns None. \"\"\" resp = self.get(url=path, **kwargs) return resp.data.get(key,", "range(min_status_code, max_status_code)) if min_status_code: return bool(res.status_code > min_status_code) return bool(res.status_code < max_status_code) async", "* from .types import * from .utils import convert_to_cls from .base_imports import _httpx_available,", "resp, client_type = 'sync', method = 'patch') def put(self, path: str, **kwargs) ->", "Union[Response, HttpResponse]: resp = self.client.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "res = await self.async_get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code,", "DataType: \"\"\" Expects to get data in JSON. If does not get the", "= 'sync', method = 'delete') def get(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "HttpResponse]: resp = await self.aclient.patch(url=path, **kwargs) if self._default_mode: return resp return Response(resp =", "Dict[str, Any] = None, **kwargs) -> Type[_Client]: \"\"\"Creates a Sync httpx Client\"\"\" _ensure_api_reqs()", "if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code)) if min_status_code: return bool(res.status_code", "client_type = 'sync', method = 'get') def head(self, path: str, **kwargs) -> Union[Response,", "self.async_get(url=path, **kwargs) if min_status_code and max_status_code: return bool(res.status_code in range(min_status_code, max_status_code)) if min_status_code:", "resp return Response(resp = resp, client_type = 'async', method = 'put') async def", "self._default_mode = default_resp or self._default_mode self._kwargs = kwargs or self._kwargs def reset_clients(self, base_url:", "or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny = None, async_config: DictAny =", "self._web = None self._async = None self._default_mode = False self.set_configs(base_url = base_url, headers", "DictAny = None, async_config: DictAny = None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name,", "**kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.put(url=path, **kwargs) if self._default_mode: return resp", "self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs) return self._async ############################################################################# # Base REST APIs", "await self.aclient.put(url=path, **kwargs) if self._default_mode: return resp return Response(resp = resp, client_type =", "or self._kwargs def reset_clients(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny =", "= 'put') async def async_post(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp =", "= 'sync', method = 'head') def patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]:", "= HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs): self.set_configs(base_url = base_url, headers", "data in JSON. If does not get the key, returns None. \"\"\" resp", "return resp return Response(resp = resp, client_type = 'async', method = 'put') async", "def async_put(self, path: str, **kwargs) -> Union[Response, HttpResponse]: resp = await self.aclient.put(url=path, **kwargs)", "cls._web = cls.create_client() return cls._web @classproperty def async_client(cls) -> Type[_AsyncClient]: if not cls._async:", "-> Union[Response, HttpResponse]: resp = self.client.delete(url=path, **kwargs) if self._default_mode: return resp return Response(resp" ]
[ "SEQUENCE = [\"|\", \"/\", \"-\", \"\\\\\", \"|\", \"/\", \"-\", \"\\\\\"] class LoadingIndicator(object): def", "\"\\\\\", \"|\", \"/\", \"-\", \"\\\\\"] class LoadingIndicator(object): def __init__(self): self._index = 0 def", "\"-\", \"\\\\\", \"|\", \"/\", \"-\", \"\\\\\"] class LoadingIndicator(object): def __init__(self): self._index = 0", "\"/\", \"-\", \"\\\\\", \"|\", \"/\", \"-\", \"\\\\\"] class LoadingIndicator(object): def __init__(self): self._index =", "self._index = 0 def current(self): return SEQUENCE[self._index] def next(self): self._index = (self._index +", "\"|\", \"/\", \"-\", \"\\\\\"] class LoadingIndicator(object): def __init__(self): self._index = 0 def current(self):", "0 def current(self): return SEQUENCE[self._index] def next(self): self._index = (self._index + 1) %", "current(self): return SEQUENCE[self._index] def next(self): self._index = (self._index + 1) % len(SEQUENCE) return", "\"\\\\\"] class LoadingIndicator(object): def __init__(self): self._index = 0 def current(self): return SEQUENCE[self._index] def", "[\"|\", \"/\", \"-\", \"\\\\\", \"|\", \"/\", \"-\", \"\\\\\"] class LoadingIndicator(object): def __init__(self): self._index", "\"-\", \"\\\\\"] class LoadingIndicator(object): def __init__(self): self._index = 0 def current(self): return SEQUENCE[self._index]", "\"/\", \"-\", \"\\\\\"] class LoadingIndicator(object): def __init__(self): self._index = 0 def current(self): return", "LoadingIndicator(object): def __init__(self): self._index = 0 def current(self): return SEQUENCE[self._index] def next(self): self._index", "= 0 def current(self): return SEQUENCE[self._index] def next(self): self._index = (self._index + 1)", "<gh_stars>1-10 SEQUENCE = [\"|\", \"/\", \"-\", \"\\\\\", \"|\", \"/\", \"-\", \"\\\\\"] class LoadingIndicator(object):", "def current(self): return SEQUENCE[self._index] def next(self): self._index = (self._index + 1) % len(SEQUENCE)", "= [\"|\", \"/\", \"-\", \"\\\\\", \"|\", \"/\", \"-\", \"\\\\\"] class LoadingIndicator(object): def __init__(self):", "return SEQUENCE[self._index] def next(self): self._index = (self._index + 1) % len(SEQUENCE) return self.current()", "def __init__(self): self._index = 0 def current(self): return SEQUENCE[self._index] def next(self): self._index =", "__init__(self): self._index = 0 def current(self): return SEQUENCE[self._index] def next(self): self._index = (self._index", "class LoadingIndicator(object): def __init__(self): self._index = 0 def current(self): return SEQUENCE[self._index] def next(self):" ]
[ "jsonify,session ) from werkzeug.exceptions import abort bp = Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST']) def", "redirect('challenges_list') filename=random_filename(file.filename) if not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复 return jsonify({'code': 0}),200 elif not", "''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> ''' else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path = '/upload/'+str(filename) print(new.new_que_id)", "if ext =='.rar' or ext == '.7z'or ext =='.zip'or ext =='.tar'or ext =='.tar.gz':", "try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return redirect('challenges_list') filename=random_filename(file.filename) if not que.query.filter(que.que_id", "new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return redirect('challenges_list') filename=random_filename(file.filename)", "que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return", "request.method == 'POST': file = request.files['file'] print(request.files) if not file: new_que = que.query.filter(que.que_id", "= que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id -= 1 db.session.delete(q) try: db.session.commit() except: db.session.rollback() raise", "= uuid.uuid4().hex + ext return new_filename else: return None from flask import (", "g, redirect, render_template, request, url_for, jsonify,session ) from werkzeug.exceptions import abort bp =", "<filename>CTF/new_file.py # -*- coding: utf-8 -*-= import os from werkzeug.utils import secure_filename from", "return new_filename else: return None from flask import ( Blueprint, flash, g, redirect,", "raise finally: db.session.close() return redirect('challenges_list') filename=random_filename(file.filename) if not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复 return", "def challenges_list(): if 'id' not in session or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1: return", "= que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close()", "finally: db.session.close() return ''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> ''' else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path", "''' else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path = '/upload/'+str(filename) print(new.new_que_id) new_que = que.query.filter(que.que_id == new.new_que_id).first()", "que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复 return jsonify({'code': 0}),200 elif not filename: #文件类型出错 q =", "redirect('../auth/login') if request.method == 'POST': file = request.files['file'] print(request.files) if not file: new_que", "uuid def random_filename(filename): #上传文件重命名 ext = os.path.splitext(filename)[1] print(type(ext)) print(ext) if ext =='.rar' or", "Blueprint, flash, g, redirect, render_template, request, url_for, jsonify,session ) from werkzeug.exceptions import abort", "=='.rar' or ext == '.7z'or ext =='.zip'or ext =='.tar'or ext =='.tar.gz': new_filename =", "print(new.new_que_id) new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise", "== new.new_que_id).first() new.new_que_id -= 1 db.session.delete(q) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close()", "import os from werkzeug.utils import secure_filename from CTF import db,new,login from CTF.models import", "secure_filename from CTF import db,new,login from CTF.models import que,user import uuid def random_filename(filename):", "coding: utf-8 -*-= import os from werkzeug.utils import secure_filename from CTF import db,new,login", "not file: new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try: db.session.commit() except: db.session.rollback()", "filename=random_filename(file.filename) if not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复 return jsonify({'code': 0}),200 elif not filename:", "print(type(ext)) print(ext) if ext =='.rar' or ext == '.7z'or ext =='.zip'or ext =='.tar'or", "db.session.rollback() raise finally: db.session.close() return redirect('challenges_list') filename=random_filename(file.filename) if not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复", "file.save(os.path.join('CTF/upload', secure_filename(filename))) path = '/upload/'+str(filename) print(new.new_que_id) new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path db.session.add(new_que)", "print(ext) if ext =='.rar' or ext == '.7z'or ext =='.zip'or ext =='.tar'or ext", "import abort bp = Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST']) def challenges_list(): if 'id' not", "ext =='.tar.gz': new_filename = uuid.uuid4().hex + ext return new_filename else: return None from", "db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return redirect('challenges_list') filename=random_filename(file.filename) if not", "= que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close()", "ext =='.rar' or ext == '.7z'or ext =='.zip'or ext =='.tar'or ext =='.tar.gz': new_filename", "db.session.close() return redirect('challenges_list') filename=random_filename(file.filename) if not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复 return jsonify({'code': 0}),200", "'id' not in session or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1: return redirect('../auth/login') if request.method", "ext == '.7z'or ext =='.zip'or ext =='.tar'or ext =='.tar.gz': new_filename = uuid.uuid4().hex +", "not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复 return jsonify({'code': 0}),200 elif not filename: #文件类型出错 q", "=='.tar'or ext =='.tar.gz': new_filename = uuid.uuid4().hex + ext return new_filename else: return None", "None from flask import ( Blueprint, flash, g, redirect, render_template, request, url_for, jsonify,session", "user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1: return redirect('../auth/login') if request.method == 'POST': file = request.files['file']", "new.new_que_id).first(): #题目名重复 return jsonify({'code': 0}),200 elif not filename: #文件类型出错 q = que.query.filter(que.que_id ==", "new.new_que_id).first() new.new_que_id -= 1 db.session.delete(q) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return", "random_filename(filename): #上传文件重命名 ext = os.path.splitext(filename)[1] print(type(ext)) print(ext) if ext =='.rar' or ext ==", "flask import ( Blueprint, flash, g, redirect, render_template, request, url_for, jsonify,session ) from", "#文件类型出错 q = que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id -= 1 db.session.delete(q) try: db.session.commit() except:", "elif not filename: #文件类型出错 q = que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id -= 1 db.session.delete(q)", "== new.new_que_id).first(): #题目名重复 return jsonify({'code': 0}),200 elif not filename: #文件类型出错 q = que.query.filter(que.que_id", "db.session.commit() except: db.session.rollback() raise finally: db.session.close() return redirect('challenges_list') filename=random_filename(file.filename) if not que.query.filter(que.que_id ==", "!=1: return redirect('../auth/login') if request.method == 'POST': file = request.files['file'] print(request.files) if not", "import secure_filename from CTF import db,new,login from CTF.models import que,user import uuid def", "<script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> ''' else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path = '/upload/'+str(filename) print(new.new_que_id) new_que", "'POST': file = request.files['file'] print(request.files) if not file: new_que = que.query.filter(que.que_id == new.new_que_id).first()", "db.session.delete(q) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return ''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\";", ") from werkzeug.exceptions import abort bp = Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST']) def challenges_list():", "file: new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise", "from werkzeug.utils import secure_filename from CTF import db,new,login from CTF.models import que,user import", "not filename: #文件类型出错 q = que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id -= 1 db.session.delete(q) try:", "== session.get('id')).first().user_teamid !=1: return redirect('../auth/login') if request.method == 'POST': file = request.files['file'] print(request.files)", "ext =='.zip'or ext =='.tar'or ext =='.tar.gz': new_filename = uuid.uuid4().hex + ext return new_filename", "from flask import ( Blueprint, flash, g, redirect, render_template, request, url_for, jsonify,session )", "in session or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1: return redirect('../auth/login') if request.method == 'POST':", "try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return ''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script>", "que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return", "ext =='.tar'or ext =='.tar.gz': new_filename = uuid.uuid4().hex + ext return new_filename else: return", "else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path = '/upload/'+str(filename) print(new.new_que_id) new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path", "= Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST']) def challenges_list(): if 'id' not in session or", "flash, g, redirect, render_template, request, url_for, jsonify,session ) from werkzeug.exceptions import abort bp", "if not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复 return jsonify({'code': 0}),200 elif not filename: #文件类型出错", "ext = os.path.splitext(filename)[1] print(type(ext)) print(ext) if ext =='.rar' or ext == '.7z'or ext", "new.new_que_id -= 1 db.session.delete(q) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return '''", "== new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return redirect('challenges_list')", "</script> ''' else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path = '/upload/'+str(filename) print(new.new_que_id) new_que = que.query.filter(que.que_id ==", "except: db.session.rollback() raise finally: db.session.close() return ''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> ''' else:", "secure_filename(filename))) path = '/upload/'+str(filename) print(new.new_que_id) new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path db.session.add(new_que) try:", "else: return None from flask import ( Blueprint, flash, g, redirect, render_template, request,", "return jsonify({'code': 0}),200 elif not filename: #文件类型出错 q = que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id", "db.session.close() return ''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> ''' else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path =", "abort bp = Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST']) def challenges_list(): if 'id' not in", "-*- coding: utf-8 -*-= import os from werkzeug.utils import secure_filename from CTF import", "werkzeug.exceptions import abort bp = Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST']) def challenges_list(): if 'id'", "return ''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> ''' else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path = '/upload/'+str(filename)", "new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally:", "new_que.que_address=None db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return redirect('challenges_list') filename=random_filename(file.filename) if", "ext return new_filename else: return None from flask import ( Blueprint, flash, g,", "path = '/upload/'+str(filename) print(new.new_que_id) new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path db.session.add(new_que) try: db.session.commit()", "request, url_for, jsonify,session ) from werkzeug.exceptions import abort bp = Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file',", "0}),200 elif not filename: #文件类型出错 q = que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id -= 1", "return redirect('challenges_list') filename=random_filename(file.filename) if not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复 return jsonify({'code': 0}),200 elif", "__name__) @bp.route('/admin/new_file', methods=['POST']) def challenges_list(): if 'id' not in session or user.query.filter(user.user_id ==", "if not file: new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try: db.session.commit() except:", "from CTF import db,new,login from CTF.models import que,user import uuid def random_filename(filename): #上传文件重命名", "or ext == '.7z'or ext =='.zip'or ext =='.tar'or ext =='.tar.gz': new_filename = uuid.uuid4().hex", "1 db.session.delete(q) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return ''' <script> alert(\"请上传压缩包格式文件\");", "== '.7z'or ext =='.zip'or ext =='.tar'or ext =='.tar.gz': new_filename = uuid.uuid4().hex + ext", "finally: db.session.close() return redirect('challenges_list') filename=random_filename(file.filename) if not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复 return jsonify({'code':", "jsonify({'code': 0}),200 elif not filename: #文件类型出错 q = que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id -=", "window.location.href=\"/admin/new\"; </script> ''' else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path = '/upload/'+str(filename) print(new.new_que_id) new_que = que.query.filter(que.que_id", "except: db.session.rollback() raise finally: db.session.close() return redirect('challenges_list') filename=random_filename(file.filename) if not que.query.filter(que.que_id == new.new_que_id).first():", "== 'POST': file = request.files['file'] print(request.files) if not file: new_que = que.query.filter(que.que_id ==", "import db,new,login from CTF.models import que,user import uuid def random_filename(filename): #上传文件重命名 ext =", "import ( Blueprint, flash, g, redirect, render_template, request, url_for, jsonify,session ) from werkzeug.exceptions", "werkzeug.utils import secure_filename from CTF import db,new,login from CTF.models import que,user import uuid", "url_for, jsonify,session ) from werkzeug.exceptions import abort bp = Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST'])", "'.7z'or ext =='.zip'or ext =='.tar'or ext =='.tar.gz': new_filename = uuid.uuid4().hex + ext return", "if request.method == 'POST': file = request.files['file'] print(request.files) if not file: new_que =", "from werkzeug.exceptions import abort bp = Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST']) def challenges_list(): if", "-= 1 db.session.delete(q) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return ''' <script>", "from CTF.models import que,user import uuid def random_filename(filename): #上传文件重命名 ext = os.path.splitext(filename)[1] print(type(ext))", "import que,user import uuid def random_filename(filename): #上传文件重命名 ext = os.path.splitext(filename)[1] print(type(ext)) print(ext) if", "or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1: return redirect('../auth/login') if request.method == 'POST': file =", "= os.path.splitext(filename)[1] print(type(ext)) print(ext) if ext =='.rar' or ext == '.7z'or ext =='.zip'or", "os.path.splitext(filename)[1] print(type(ext)) print(ext) if ext =='.rar' or ext == '.7z'or ext =='.zip'or ext", "print(request.files) if not file: new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try: db.session.commit()", "utf-8 -*-= import os from werkzeug.utils import secure_filename from CTF import db,new,login from", "# -*- coding: utf-8 -*-= import os from werkzeug.utils import secure_filename from CTF", "q = que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id -= 1 db.session.delete(q) try: db.session.commit() except: db.session.rollback()", "#题目名重复 return jsonify({'code': 0}),200 elif not filename: #文件类型出错 q = que.query.filter(que.que_id == new.new_que_id).first()", "= request.files['file'] print(request.files) if not file: new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None db.session.add(new_que)", "if 'id' not in session or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1: return redirect('../auth/login') if", "Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST']) def challenges_list(): if 'id' not in session or user.query.filter(user.user_id", "( Blueprint, flash, g, redirect, render_template, request, url_for, jsonify,session ) from werkzeug.exceptions import", "que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id -= 1 db.session.delete(q) try: db.session.commit() except: db.session.rollback() raise finally:", "filename: #文件类型出错 q = que.query.filter(que.que_id == new.new_que_id).first() new.new_que_id -= 1 db.session.delete(q) try: db.session.commit()", "alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> ''' else: file.save(os.path.join('CTF/upload', secure_filename(filename))) path = '/upload/'+str(filename) print(new.new_que_id) new_que =", "CTF import db,new,login from CTF.models import que,user import uuid def random_filename(filename): #上传文件重命名 ext", "=='.tar.gz': new_filename = uuid.uuid4().hex + ext return new_filename else: return None from flask", "db,new,login from CTF.models import que,user import uuid def random_filename(filename): #上传文件重命名 ext = os.path.splitext(filename)[1]", "que,user import uuid def random_filename(filename): #上传文件重命名 ext = os.path.splitext(filename)[1] print(type(ext)) print(ext) if ext", "render_template, request, url_for, jsonify,session ) from werkzeug.exceptions import abort bp = Blueprint('/admin/new_file', __name__)", "methods=['POST']) def challenges_list(): if 'id' not in session or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1:", "not in session or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1: return redirect('../auth/login') if request.method ==", "new_filename = uuid.uuid4().hex + ext return new_filename else: return None from flask import", "+ ext return new_filename else: return None from flask import ( Blueprint, flash,", "def random_filename(filename): #上传文件重命名 ext = os.path.splitext(filename)[1] print(type(ext)) print(ext) if ext =='.rar' or ext", "=='.zip'or ext =='.tar'or ext =='.tar.gz': new_filename = uuid.uuid4().hex + ext return new_filename else:", "bp = Blueprint('/admin/new_file', __name__) @bp.route('/admin/new_file', methods=['POST']) def challenges_list(): if 'id' not in session", "CTF.models import que,user import uuid def random_filename(filename): #上传文件重命名 ext = os.path.splitext(filename)[1] print(type(ext)) print(ext)", "return redirect('../auth/login') if request.method == 'POST': file = request.files['file'] print(request.files) if not file:", "redirect, render_template, request, url_for, jsonify,session ) from werkzeug.exceptions import abort bp = Blueprint('/admin/new_file',", "uuid.uuid4().hex + ext return new_filename else: return None from flask import ( Blueprint,", "raise finally: db.session.close() return ''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> ''' else: file.save(os.path.join('CTF/upload', secure_filename(filename)))", "db.session.rollback() raise finally: db.session.close() return ''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> ''' else: file.save(os.path.join('CTF/upload',", "challenges_list(): if 'id' not in session or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1: return redirect('../auth/login')", "'/upload/'+str(filename) print(new.new_que_id) new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path db.session.add(new_que) try: db.session.commit() except: db.session.rollback()", "db.session.commit() except: db.session.rollback() raise finally: db.session.close() return ''' <script> alert(\"请上传压缩包格式文件\"); window.location.href=\"/admin/new\"; </script> '''", "os from werkzeug.utils import secure_filename from CTF import db,new,login from CTF.models import que,user", "request.files['file'] print(request.files) if not file: new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try:", "new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally:", "new_filename else: return None from flask import ( Blueprint, flash, g, redirect, render_template,", "session.get('id')).first().user_teamid !=1: return redirect('../auth/login') if request.method == 'POST': file = request.files['file'] print(request.files) if", "return None from flask import ( Blueprint, flash, g, redirect, render_template, request, url_for,", "-*-= import os from werkzeug.utils import secure_filename from CTF import db,new,login from CTF.models", "@bp.route('/admin/new_file', methods=['POST']) def challenges_list(): if 'id' not in session or user.query.filter(user.user_id == session.get('id')).first().user_teamid", "= '/upload/'+str(filename) print(new.new_que_id) new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=path db.session.add(new_que) try: db.session.commit() except:", "#上传文件重命名 ext = os.path.splitext(filename)[1] print(type(ext)) print(ext) if ext =='.rar' or ext == '.7z'or", "file = request.files['file'] print(request.files) if not file: new_que = que.query.filter(que.que_id == new.new_que_id).first() new_que.que_address=None", "== new.new_que_id).first() new_que.que_address=path db.session.add(new_que) try: db.session.commit() except: db.session.rollback() raise finally: db.session.close() return redirect('challenges_list')", "import uuid def random_filename(filename): #上传文件重命名 ext = os.path.splitext(filename)[1] print(type(ext)) print(ext) if ext =='.rar'", "session or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1: return redirect('../auth/login') if request.method == 'POST': file" ]
[ "the dataset. for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true = y_batch_val", "parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number of RoIs to process at once.\", default=32) parser.add_option(\"--network\",", "fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1) # tensorboard writer, automatically creates", "fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch):", "based on backend and model C.base_net_weights = nn.get_weight_path() train_imgs, classes_count, class_mapping = get_data(options.train_path)", "use. Supports vgg or resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with horizontal flips in", "tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred)", "accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc',", "rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1) # tensorboard", "# used to load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2]", "import keras_frcnn.roi_helpers as roi_helpers from tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import np_config", "options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2) != '.hdf5': print('Output weights must have", "= fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result() # write training loss and", "generic_utils.Progbar(n_steps) # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train,", "epoch in range(n_epochs): print(\"\\nStart of epoch %d\" % (epoch + 1,)) progbar =", "ground truth boxes.' ' Check RPN settings or keep training.') loss_rpn_cls = np.mean(valid_losses[:,", "keep training.') loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:,", "@tf.function def frcnn_train_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred", "C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else:", "on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors)", "= num_imgs // BATCH_SIZE n_valid_steps = num_valid_imgs // BATCH_SIZE losses = np.zeros((n_steps, 5))", "model_rpn(x_batch_train, training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write training", "samples {len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs,", "weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) # Defining", "help=\"Number of RoIs to process at once.\", default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base network to", "to validation data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser to use. One of simple or", "step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1,", "overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes}') if mean_overlapping_bboxes == 0: print('RPN", "training loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss',", "fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc =", "path for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path for weights. If not specified,", "keras_frcnn.simple_parser import get_data else: raise ValueError(\"Command line option parser must be one of", "Iterate over the batches of the dataset. for step, (x_batch_val, y_batch_val, img_data) in", "Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor_valid.append(0)", "10 == 0: # print(\"Step %d, RPN Cls Loss: %.4f RPN reg Loss:", "built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers,", "y1_tensor, y2_tensor) valid_losses[step, 0] = rpn_class_loss valid_losses[step, 1] = rpn_reg_loss valid_losses[step, 2] =", "__future__ import division import random import pprint import sys import time import numpy", "of epoch %d\" % (epoch + 1,)) progbar = generic_utils.Progbar(n_steps) # Iterate over", "the batches of the dataset. for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val): y_rpn_cls_true,", "// BATCH_SIZE n_valid_steps = num_valid_imgs // BATCH_SIZE losses = np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor =", "= generic_utils.Progbar(n_valid_steps) # Iterate over the batches of the dataset. for step, (x_batch_val,", "+ model_path_regex.group(2)) break # # Log every 10 steps. # if step %", "RPN = {mean_overlapping_bboxes} for {step} previous iterations') if mean_overlapping_bboxes == 0: print('RPN is", "import get_data else: raise ValueError(\"Command line option parser must be one of 'pascal_voc'", "division import random import pprint import sys import time import numpy as np", "Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01) # Accuracy metrics for Fast RCNN model train_classifier_metric =", "print( f'Mean number of bounding boxes from RPN overlapping ' f'ground truth boxes:", "dataset. for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train): # print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true", "roi_input = Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built", ":, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples)", "4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = [] if C.verbose: print(\"Validation", "tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return", "as K from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.layers import Input from tensorflow.keras.models", "train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function", "directory if os.path.exists('logs'): shutil.rmtree('logs') parser = OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path to training", "loss_class_cls + loss_class_regr print(\"Total validation loss: %.4f\" % curr_loss) start_time = time.time() break", "logs train_writer = tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step, x_batch_train, y_batch_train): with", "path directory exists, it will delete the directory if os.path.exists('logs'): shutil.rmtree('logs') parser =", "import get_data elif options.parser == 'simple': from keras_frcnn.simple_parser import get_data else: raise ValueError(\"Command", "print('Not a valid model') raise ValueError # check if weight path was passed", "loaded when testing to ensure correct results') num_imgs = len(train_imgs) num_valid_imgs = len(val_imgs)", "number of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes}') if mean_overlapping_bboxes ==", "training loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss',", "model_all = Model([img_input, roi_input], rpn[:2] + classifier) # Defining optimizers for all models", "from keras_frcnn import resnet as nn C.network = 'resnet50' else: print('Not a valid", "use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2,", "= tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss,", "parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path to validation data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser to use.", "= len(train_imgs) num_valid_imgs = len(val_imgs) print(f'Num train samples {len(train_imgs)}') print(f'Num val samples {len(val_imgs)}')", "boxes ' f'from RPN = {mean_overlapping_bboxes} for {step} previous iterations') if mean_overlapping_bboxes ==", "with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1,", "{mean_overlapping_bboxes} for {step} previous iterations') if mean_overlapping_bboxes == 0: print('RPN is not producing", "0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples", "to \" \"the training (to be used when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output", "time.time() if curr_loss < best_loss: if C.verbose: print( f'Total loss decreased from {best_loss}", "keras.\") (options, args) = parser.parse_args() if not options.train_path: # if filename is not", "writes logs train_writer = tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step, x_batch_train, y_batch_train):", "if filename is not given parser.error('Error: path to training data must be specified.", "holds both the RPN and the classifier, # used to load/save weights for", "C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips) C.rot_90 = bool(options.rot_90) C.model_path = options.output_weight_path model_path_regex", "3] = fast_rcnn_reg_loss losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr',", "training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc =", "truth boxes.' ' Check RPN settings or keep training.') loss_rpn_cls = np.mean(losses[:, 0])", "losses[step, 2] = fast_rcnn_class_loss losses[step, 3] = fast_rcnn_reg_loss losses[step, 4] = fast_rcnn_class_acc progbar.update(step", "when testing to ensure correct results') num_imgs = len(train_imgs) num_valid_imgs = len(val_imgs) print(f'Num", "%.4f \" # \"FRCNN Cls Loss: %.4f FRCNN reg Loss: %.4f\" % (", "optimizers for all models optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01)", "bounding boxes that overlap the ground truth boxes.' ' Check RPN settings or", "def rpn_train_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred,", "to the tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred,", "X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None:", "\"--valid_path\", dest=\"valid_path\", help=\"Path to validation data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser to use. One", "= {v: k for k, v in class_mapping.items()} global_step = tf.convert_to_tensor(0, tf.int64) one_step", "= fast_rcnn_class_loss valid_losses[step, 3] = fast_rcnn_reg_loss valid_losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1,", "as tf from tensorflow.keras import backend as K from tensorflow.keras.optimizers import Adam, SGD", "roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2)", "loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss,", "Fast RCNN model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() # Loss function of", "/ len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = [] if C.verbose: print(\"Validation Metrics: \") print( f'Mean number", ":], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step( global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor) losses[step,", "in enumerate(data_gen_train): # print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true = y_batch_train step = tf.cast(step, dtype=tf.int64)", "= tf.cast(step, dtype=tf.int64) global_step = tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step(", "as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss", "rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result() # write training", "== 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif options.parser == 'simple': from keras_frcnn.simple_parser import", "step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_train_step(step, x_batch_train, X2, Y1, Y2):", "os.path.exists('logs'): shutil.rmtree('logs') parser = OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path to training data.\") parser.add_option(\"-v\",", "(including bg) = {len(classes_count)}') config_output_filename = options.config_filename with open(config_output_filename, 'wb') as config_f: pickle.dump(C,", "continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor", "rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :],", "= OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path to training data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path", "RPN settings or keep training.') loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1])", "RPN: {class_acc}') print(f'Loss RPN classifier: {loss_rpn_cls}') print(f'Loss RPN regression: {loss_rpn_regr}') print(f'Loss Detector classifier:", "step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def rpn_valid_step(step, x_batch_train, y_batch_train):", "len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print(f'\\nAverage number of overlapping bounding boxes ' f'from RPN", "frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result() #", "rpn_valid_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred", "= pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples =", "dest=\"config_filename\", help=\"Location to store all the metadata related to \" \"the training (to", "for all models optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01) #", "optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01) # Accuracy metrics for Fast RCNN model", "'{:04d}'.format( epoch) + model_path_regex.group(2)) break # # Log every 10 steps. # if", "1] = rpn_reg_loss losses[step, 2] = fast_rcnn_class_loss losses[step, 3] = fast_rcnn_reg_loss losses[step, 4]", "fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result() # write training loss", "\"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number of RoIs to process at once.\", default=32) parser.add_option(\"--network\", dest=\"network\",", "model and Fast RCNN model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn =", "RPN settings or keep training.') loss_rpn_cls = np.mean(valid_losses[:, 0]) loss_rpn_regr = np.mean(valid_losses[:, 1])", "--path to command line') if options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif", "('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_steps - 1 and C.verbose: mean_overlapping_bboxes", "testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path", "0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if", "0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3])", "when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input", "backend as K from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.layers import Input from", "'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping", "over the batches of the dataset. for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train):", "C.verbose: print(\"Validation Metrics: \") print( f'Mean number of bounding boxes from RPN overlapping", "# Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train, img_data)", "f'Total loss decreased from {best_loss} to {curr_loss}, saving weights') best_loss = curr_loss model_all.save_weights(model_path_regex.group(1)", "import random import pprint import sys import time import numpy as np from", "inv_map = {v: k for k, v in class_mapping.items()} print('Training images per class:')", "rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write training loss and accuracy to the tensorboard with valid_writer.as_default():", "loss_class_regr = np.mean(valid_losses[:, 3]) class_acc = np.mean(valid_losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) ) /", "{len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count,", "model_rpn(x_batch_train, training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss,", "reg Loss: %.4f \" # \"FRCNN Cls Loss: %.4f FRCNN reg Loss: %.4f\"", "None, None) else: input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input =", "print('RPN is not producing bounding boxes that overlap the ground truth boxes.' '", "y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_train_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape()", "metrics for Fast RCNN model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() # Loss", "option parser must be one of 'pascal_voc' or 'simple'\") # pass the settings", "' f'and can be loaded when testing to ensure correct results') num_imgs =", "Detector regression: {loss_class_regr}') print(f'Elapsed time: {time.time() - start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr", "get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples,", "len(val_imgs) print(f'Num train samples {len(train_imgs)}') print(f'Num val samples {len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count,", "rpn_train_step( global_step, x_batch_train, y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300)", "classifier: {loss_class_cls}') print(f'Loss Detector regression: {loss_class_regr}') print(f'Elapsed time: {time.time() - start_time}') curr_loss =", "a valid model') raise ValueError # check if weight path was passed via", "help=\"Input path for weights. If not specified, will try to\" \" load default", "except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples", "valid_losses[step, 3] = fast_rcnn_reg_loss valid_losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss),", "tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return", "step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_train_step(step, x_batch_train,", "np_config.enable_numpy_behavior() # if Logs path directory exists, it will delete the directory if", "to store all the metadata related to \" \"the training (to be used", "as nn C.network = 'resnet50' else: print('Not a valid model') raise ValueError #", "n_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor =", "rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) -", "= int(options.num_rois) if options.network == 'vgg': C.network = 'vgg' from keras_frcnn import vgg", "C.model_path = options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2) != '.hdf5': print('Output weights", "None) else: input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None,", "Supports vgg or resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with horizontal flips in training.", "\"--parser\", dest=\"parser\", help=\"Parser to use. One of simple or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\",", "(epoch + 1,)) progbar = generic_utils.Progbar(n_steps) # Iterate over the batches of the", "= fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads,", "fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples", "set the path to weights based on backend and model C.base_net_weights = nn.get_weight_path()", "epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to store all the metadata related to \"", "default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with horizontal flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\",", "- 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = []", "valid_losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss),", "global_step = tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step( global_step, x_batch_train, y_batch_train)", "frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result() # write training", "= np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr =", "= y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step( global_step, x_batch_val, y_batch_val) R =", "= tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32) y2_tensor =", "def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples =", "C.rot_90 = bool(options.rot_90) C.model_path = options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2) !=", "\"FRCNN Cls Loss: %.4f FRCNN reg Loss: %.4f\" % ( # step, float(rpn_class_loss),", "rpn_reg_loss @tf.function def frcnn_train_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred,", "for k, v in class_mapping.items()} global_step = tf.convert_to_tensor(0, tf.int64) one_step = tf.convert_to_tensor(1, tf.int64)", "pprint import sys import time import numpy as np from optparse import OptionParser", "frcnn_valid_step( global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor) valid_losses[step, 0] = rpn_class_loss valid_losses[step, 1] =", "R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts", "to {curr_loss}, saving weights') best_loss = curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\" + '{:04d}'.format( epoch)", "delete the directory if os.path.exists('logs'): shutil.rmtree('logs') parser = OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path", "over the batches of the dataset. for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val):", "= rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write training", "import os import re import shutil import tensorflow as tf from tensorflow.keras import", "# print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true = y_batch_train step = tf.cast(step, dtype=tf.int64) global_step =", "= curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\" + '{:04d}'.format( epoch) + model_path_regex.group(2)) break # #", "model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is a", "valid model') raise ValueError # check if weight path was passed via command", "model that holds both the RPN and the classifier, # used to load/save", "step == n_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor)", "float(fast_rcnn_reg_loss))) # Reset training metrics at the end of each epoch train_classifier_metric.reset_states() progbar", "> 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples", "{best_loss} to {curr_loss}, saving weights') best_loss = curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\" + '{:04d}'.format(", "creates directory and writes logs train_writer = tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function def", "(x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2", "config_f: pickle.dump(C, config_f) print(f'Config has been written to {config_output_filename}, ' f'and can be", "Log every 10 steps. # if step % 10 == 0: # print(\"Step", "RPN = {mean_overlapping_bboxes}') if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes", "fast_rcnn_class_loss valid_losses[step, 3] = fast_rcnn_reg_loss valid_losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls',", "command line if options.input_weight_path: C.base_net_weights = options.input_weight_path else: # set the path to", "persist them in the config object C = config.Config() C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips", "rpn_class_loss, rpn_reg_loss @tf.function def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape:", "not options.train_path: # if filename is not given parser.error('Error: path to training data", "k, v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print(f'Num classes (including bg)", "pprint.pprint(classes_count) print(f'Num classes (including bg) = {len(classes_count)}') config_output_filename = options.config_filename with open(config_output_filename, 'wb')", "base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier =", "model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2) != '.hdf5': print('Output weights must have .hdf5", "\"the training (to be used when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path for", "settings or keep training.') loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls", "loss_rpn_cls = np.mean(valid_losses[:, 0]) loss_rpn_regr = np.mean(valid_losses[:, 1]) loss_class_cls = np.mean(valid_losses[:, 2]) loss_class_regr", "or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples", "np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in", "model') raise ValueError # check if weight path was passed via command line", "Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the", "action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number of epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to", "truth boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}') print(f'Loss RPN", "if os.path.exists('logs'): shutil.rmtree('logs') parser = OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path to training data.\")", "with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss", "n_epochs = options.num_epochs BATCH_SIZE = 1 n_steps = num_imgs // BATCH_SIZE n_valid_steps =", "is a model that holds both the RPN and the classifier, # used", "rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :],", "len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation: Average number of overlapping bounding boxes ' f'from", "= np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples),", "('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_valid_steps - 1 and C.verbose: mean_overlapping_bboxes", "n_valid_steps = num_valid_imgs // BATCH_SIZE losses = np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch", "pass the settings from the command line, and persist them in the config", "been written to {config_output_filename}, ' f'and can be loaded when testing to ensure", "class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch)", "= [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = []", "@tf.function def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred", "if options.input_weight_path: C.base_net_weights = options.input_weight_path else: # set the path to weights based", "fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function", "valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true,", "default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base network to use. Supports vgg or resnet50.\", default='resnet50') parser.add_option(\"--hf\",", "img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true = y_batch_train step = tf.cast(step, dtype=tf.int64) global_step = tf.add(global_step, one_step)", "= [] rpn_accuracy_for_epoch_valid = [] best_loss = np.Inf start_time = time.time() class_mapping_inv =", "img_data) in enumerate(data_gen_train): # print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true = y_batch_train step = tf.cast(step,", "keras_frcnn.pascal_voc_parser import get_data elif options.parser == 'simple': from keras_frcnn.simple_parser import get_data else: raise", "= fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc", "fast_rcnn_class_acc @tf.function def rpn_valid_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true =", "import shutil import tensorflow as tf from tensorflow.keras import backend as K from", "y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred)", "parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path for weights.", "np.mean(valid_losses[:, 1]) loss_class_cls = np.mean(valid_losses[:, 2]) loss_class_regr = np.mean(valid_losses[:, 3]) class_acc = np.mean(valid_losses[:,", "np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid = [] best_loss = np.Inf start_time =", "= losses.FastrcnnRegLoss(len(classes_count) - 1) # tensorboard writer, automatically creates directory and writes logs", "losses.FastrcnnRegLoss(len(classes_count) - 1) # tensorboard writer, automatically creates directory and writes logs train_writer", "{v: k for k, v in class_mapping.items()} global_step = tf.convert_to_tensor(0, tf.int64) one_step =", "tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step( global_step,", "best_loss: if C.verbose: print( f'Total loss decreased from {best_loss} to {curr_loss}, saving weights')", "steps. # if step % 10 == 0: # print(\"Step %d, RPN Cls", "('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_valid_steps - 1 and", "try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois", "{mean_overlapping_bboxes}') print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}') print(f'Loss RPN classifier: {loss_rpn_cls}')", "% curr_loss) start_time = time.time() if curr_loss < best_loss: if C.verbose: print( f'Total", ":], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step(", "# print(\"Step %d, RPN Cls Loss: %.4f RPN reg Loss: %.4f \" #", "options.parser == 'simple': from keras_frcnn.simple_parser import get_data else: raise ValueError(\"Command line option parser", "= frcnn_train_step( global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor) losses[step, 0] = rpn_class_loss losses[step, 1]", "import Model from keras_frcnn import config, data_generators from keras_frcnn import losses as losses", "= Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01) # Accuracy metrics for Fast", "from tensorflow.keras.models import Model from keras_frcnn import config, data_generators from keras_frcnn import losses", "= np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples),", "K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format", "batches of the dataset. for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true", "global_step, x_batch_val, y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) #", "= Model([img_input, roi_input], rpn[:2] + classifier) # Defining optimizers for all models optimizer_rpn", "case where num_rois = 1, we pick a random pos or neg sample", "optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01) # Accuracy metrics for", "default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to store all the metadata related to \" \"the", "# this is a model that holds both the RPN and the classifier,", "command line, and persist them in the config object C = config.Config() C.use_horizontal_flips", "rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2,", "5)) rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid = [] best_loss = np.Inf start_time = time.time()", "f'\\nMean number of bounding boxes from RPN overlapping ' f'ground truth boxes: {mean_overlapping_bboxes}')", "1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation:", "X2], training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss,", "= (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers =", "rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result() # write training loss and accuracy to the tensorboard", "else: # set the path to weights based on backend and model C.base_net_weights", "curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\" + '{:04d}'.format( epoch) + model_path_regex.group(2)) break # # Log", "x2_tensor, y1_tensor, y2_tensor) valid_losses[step, 0] = rpn_class_loss valid_losses[step, 1] = rpn_reg_loss valid_losses[step, 2]", "fast_rcnn_reg_loss valid_losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls',", "print(f'Num val samples {len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val", "of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes} for {step} previous iterations')", "from {best_loss} to {curr_loss}, saving weights') best_loss = curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\" +", "help=\"Number of epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to store all the metadata related", "models model_all = Model([img_input, roi_input], rpn[:2] + classifier) # Defining optimizers for all", "y_batch_train, img_data) in enumerate(data_gen_train): # print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true = y_batch_train step =", "trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales)", "action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with vertical flips in training. (Default=false).\", action=\"store_true\", default=False)", "= generic_utils.Progbar(n_steps) # Iterate over the batches of the dataset. for step, (x_batch_train,", "saving weights') best_loss = curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\" + '{:04d}'.format( epoch) + model_path_regex.group(2))", "iterations') if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes that overlap", "== n_valid_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid", "else: sel_samples = random.choice(pos_samples) return sel_samples n_epochs = options.num_epochs BATCH_SIZE = 1 n_steps", "boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}') print(f'Loss RPN classifier:", "None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples,", "it will delete the directory if os.path.exists('logs'): shutil.rmtree('logs') parser = OptionParser() parser.add_option(\"-p\", \"--path\",", "Accuracy metrics for Fast RCNN model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() #", "provided by keras.\") (options, args) = parser.parse_args() if not options.train_path: # if filename", "Logs path directory exists, it will delete the directory if os.path.exists('logs'): shutil.rmtree('logs') parser", "continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor", "re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2) != '.hdf5': print('Output weights must have .hdf5 filetype') exit(1)", "training loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss',", "= np.mean(valid_losses[:, 1]) loss_class_cls = np.mean(valid_losses[:, 2]) loss_class_regr = np.mean(valid_losses[:, 3]) class_acc =", "sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor =", "nn elif options.network == 'resnet50': from keras_frcnn import resnet as nn C.network =", "Reset training metrics at the end of each epoch train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps)", "= model_rpn(x_batch_train, training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads =", "with horizontal flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with vertical", "tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc", "parser must be one of 'pascal_voc' or 'simple'\") # pass the settings from", "{len(classes_count)}') config_output_filename = options.config_filename with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print(f'Config has", "dest=\"train_path\", help=\"Path to training data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path to validation data.\") parser.add_option(\"-o\",", "valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function", "every 10 steps. # if step % 10 == 0: # print(\"Step %d,", "SGD from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from keras_frcnn import config,", "= parser.parse_args() if not options.train_path: # if filename is not given parser.error('Error: path", "training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment with 90 degree rotations in", "on backend and model C.base_net_weights = nn.get_weight_path() train_imgs, classes_count, class_mapping = get_data(options.train_path) val_imgs,", "pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples)", "pickle import os import re import shutil import tensorflow as tf from tensorflow.keras", "rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1,", "= num_valid_imgs // BATCH_SIZE losses = np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch =", "loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total validation loss: %.4f\" % curr_loss)", "import time import numpy as np from optparse import OptionParser import pickle import", "y_rpn_regr_pred = model_rpn(x_batch_train, training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads", "parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with horizontal flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\",", "generic_utils.Progbar(n_valid_steps) # Iterate over the batches of the dataset. for step, (x_batch_val, y_batch_val,", "C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except:", "get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0,", "= {len(classes_count)}') config_output_filename = options.config_filename with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print(f'Config", "{class_acc}') print(f'Loss RPN classifier: {loss_rpn_cls}') print(f'Loss RPN regression: {loss_rpn_regr}') print(f'Loss Detector classifier: {loss_class_cls}')", "y_batch_train step = tf.cast(step, dtype=tf.int64) global_step = tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss", "default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path for weights. If not specified, will try to\"", "y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as", "time.time() class_mapping_inv = {v: k for k, v in class_mapping.items()} global_step = tf.convert_to_tensor(0,", "in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment with 90 degree rotations", "weights') best_loss = curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\" + '{:04d}'.format( epoch) + model_path_regex.group(2)) break", "'channels_first': input_shape_img = (3, None, None) else: input_shape_img = (None, None, 3) img_input", "= [] rpn_accuracy_for_epoch = [] valid_losses = np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid", "= nn.get_weight_path() train_imgs, classes_count, class_mapping = get_data(options.train_path) val_imgs, _, _ = get_data(options.valid_path) if", "the metadata related to \" \"the training (to be used when testing).\", default=\"config.pickle\")", "sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() # if Logs path directory exists, it", "!= '.hdf5': print('Output weights must have .hdf5 filetype') exit(1) C.num_rois = int(options.num_rois) if", "rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_steps - 1", "is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:,", "to {config_output_filename}, ' f'and can be loaded when testing to ensure correct results')", "ensure correct results') num_imgs = len(train_imgs) num_valid_imgs = len(val_imgs) print(f'Num train samples {len(train_imgs)}')", "np from optparse import OptionParser import pickle import os import re import shutil", "was passed via command line if options.input_weight_path: C.base_net_weights = options.input_weight_path else: # set", "rpn_class_loss losses[step, 1] = rpn_reg_loss losses[step, 2] = fast_rcnn_class_loss losses[step, 3] = fast_rcnn_reg_loss", "[] if C.verbose: print(\"Validation Metrics: \") print( f'Mean number of bounding boxes from", "boxes.' ' Check RPN settings or keep training.') loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr", "3] = fast_rcnn_reg_loss valid_losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr',", "dest=\"valid_path\", help=\"Path to validation data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser to use. One of", "backend and model C.base_net_weights = nn.get_weight_path() train_imgs, classes_count, class_mapping = get_data(options.train_path) val_imgs, _,", "fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def rpn_valid_step(step, x_batch_train,", "rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_train_step(step, x_batch_train, X2, Y1,", "images per class:') pprint.pprint(classes_count) print(f'Num classes (including bg) = {len(classes_count)}') config_output_filename = options.config_filename", "Loss: %.4f \" # \"FRCNN Cls Loss: %.4f FRCNN reg Loss: %.4f\" %", "Model from keras_frcnn import config, data_generators from keras_frcnn import losses as losses import", "class_mapping) if X2 is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid)", "Loss: %.4f\" % ( # step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) # Reset", "fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0, :, -1] == 1)", "or keep training.') loss_rpn_cls = np.mean(valid_losses[:, 0]) loss_rpn_regr = np.mean(valid_losses[:, 1]) loss_class_cls =", "FRCNN reg Loss: %.4f\" % ( # step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss)))", "# Iterate over the batches of the dataset. for step, (x_batch_val, y_batch_val, img_data)", "accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc',", "// 2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples", "classifier: {loss_rpn_cls}') print(f'Loss RPN regression: {loss_rpn_regr}') print(f'Loss Detector classifier: {loss_class_cls}') print(f'Loss Detector regression:", "= neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0]", "len(train_imgs) num_valid_imgs = len(val_imgs) print(f'Num train samples {len(train_imgs)}') print(f'Num val samples {len(val_imgs)}') data_gen_train", "config object C = config.Config() C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips) C.rot_90 =", "path to training data must be specified. Pass --path to command line') if", "print(f'Elapsed time: {time.time() - start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls +", "if K.image_data_format() == 'channels_first': input_shape_img = (3, None, None) else: input_shape_img = (None,", "%d epochs\" % n_epochs) for epoch in range(n_epochs): print(\"\\nStart of epoch %d\" %", "K from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.layers import Input from tensorflow.keras.models import", "the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) # Defining optimizers for", "passed via command line if options.input_weight_path: C.base_net_weights = options.input_weight_path else: # set the", "# set the path to weights based on backend and model C.base_net_weights =", "10 steps. # if step % 10 == 0: # print(\"Step %d, RPN", "training.') loss_rpn_cls = np.mean(valid_losses[:, 0]) loss_rpn_regr = np.mean(valid_losses[:, 1]) loss_class_cls = np.mean(valid_losses[:, 2])", "X2 is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor =", "tf.int64) print(\"Training started for %d epochs\" % n_epochs) for epoch in range(n_epochs): print(\"\\nStart", "accuracy for bounding boxes from RPN: {class_acc}') print(f'Loss RPN classifier: {loss_rpn_cls}') print(f'Loss RPN", "and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)", "rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_valid_steps - 1", "Check RPN settings or keep training.') loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:,", "dest=\"vertical_flips\", help=\"Augment with vertical flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\",", "dest=\"output_weight_path\", help=\"Output path for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path for weights. If", "= 'vgg' from keras_frcnn import vgg as nn elif options.network == 'resnet50': from", "break # # Log every 10 steps. # if step % 10 ==", "print(f'\\nValidation: Average number of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes}') if", "with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss,", "OptionParser import pickle import os import re import shutil import tensorflow as tf", "import config, data_generators from keras_frcnn import losses as losses import keras_frcnn.roi_helpers as roi_helpers", "model_rpn.trainable_weights)) # write training loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss',", "with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False)", "overlap the ground truth boxes.' ' Check RPN settings or keep training.') loss_rpn_cls", "of bounding boxes from RPN overlapping ' f'ground truth boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy", "= np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: #", "fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1) # tensorboard writer, automatically creates directory and writes", "that overlap the ground truth boxes.' ' Check RPN settings or keep training.')", "C.num_rois = int(options.num_rois) if options.network == 'vgg': C.network = 'vgg' from keras_frcnn import", "object C = config.Config() C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips) C.rot_90 = bool(options.rot_90)", "in the config object C = config.Config() C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips)", "(x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true = y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss", "'wb') as config_f: pickle.dump(C, config_f) print(f'Config has been written to {config_output_filename}, ' f'and", "the dataset. for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train): # print(step, img_data['filepath']) y_rpn_cls_true,", "+ selected_neg_samples else: # in the extreme case where num_rois = 1, we", "rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn =", "Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss =", "= fast_rcnn_class_loss losses[step, 3] = fast_rcnn_reg_loss losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1,", "'resnet50' else: print('Not a valid model') raise ValueError # check if weight path", "C.class_mapping = class_mapping inv_map = {v: k for k, v in class_mapping.items()} print('Training", "print(\"Training started for %d epochs\" % n_epochs) for epoch in range(n_epochs): print(\"\\nStart of", "help=\"Parser to use. One of simple or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\",", "max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2,", "'pascal_voc' or 'simple'\") # pass the settings from the command line, and persist", "in range(n_epochs): print(\"\\nStart of epoch %d\" % (epoch + 1,)) progbar = generic_utils.Progbar(n_steps)", "tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss,", "class_mapping = get_data(options.train_path) val_imgs, _, _ = get_data(options.valid_path) if 'bg' not in classes_count:", "loss_class_cls + loss_class_regr print(\"Total Loss: %.4f\" % curr_loss) start_time = time.time() if curr_loss", "if options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif options.parser == 'simple': from", "from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C,", "BATCH_SIZE n_valid_steps = num_valid_imgs // BATCH_SIZE losses = np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor = []", "= rpn_reg_loss losses[step, 2] = fast_rcnn_class_loss losses[step, 3] = fast_rcnn_reg_loss losses[step, 4] =", "classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier =", "random.choice(pos_samples) return sel_samples n_epochs = options.num_epochs BATCH_SIZE = 1 n_steps = num_imgs //", "from RPN: {class_acc}') print(f'Loss RPN classifier: {loss_rpn_cls}') print(f'Loss RPN regression: {loss_rpn_regr}') print(f'Loss Detector", "# write training loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss,", "Pass --path to command line') if options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data", "fast_rcnn_class_acc = train_classifier_metric.result() # write training loss and accuracy to the tensorboard with", "directory and writes logs train_writer = tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step,", "RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn =", "len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) >", "- start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total Loss:", "- 1) # tensorboard writer, automatically creates directory and writes logs train_writer =", "'.hdf5': print('Output weights must have .hdf5 filetype') exit(1) C.num_rois = int(options.num_rois) if options.network", "neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else:", "IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue", "0] = rpn_class_loss losses[step, 1] = rpn_reg_loss losses[step, 2] = fast_rcnn_class_loss losses[step, 3]", "help=\"Path to validation data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser to use. One of simple", "// BATCH_SIZE losses = np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] valid_losses", "shutil import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras.optimizers", "them in the config object C = config.Config() C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips =", ":], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step( global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor) valid_losses[step,", "if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor", "= np.Inf start_time = time.time() class_mapping_inv = {v: k for k, v in", "step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0, :,", "directory exists, it will delete the directory if os.path.exists('logs'): shutil.rmtree('logs') parser = OptionParser()", "shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers", "dest=\"parser\", help=\"Parser to use. One of simple or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\",", "training loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss',", "y_rpn_cls_true, y_rpn_regr_true = y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step( global_step, x_batch_val, y_batch_val)", "will delete the directory if os.path.exists('logs'): shutil.rmtree('logs') parser = OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\",", "and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step)", "epoch %d\" % (epoch + 1,)) progbar = generic_utils.Progbar(n_steps) # Iterate over the", "tf.convert_to_tensor(1, tf.int64) print(\"Training started for %d epochs\" % n_epochs) for epoch in range(n_epochs):", "global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor) losses[step, 0] = rpn_class_loss losses[step, 1] = rpn_reg_loss", "pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples,", "get_data(options.valid_path) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping", "selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else:", "help=\"Augment with vertical flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment", "rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result()", "vgg as nn elif options.network == 'resnet50': from keras_frcnn import resnet as nn", "help=\"Augment with 90 degree rotations in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\",", "path was passed via command line if options.input_weight_path: C.base_net_weights = options.input_weight_path else: #", "used when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\",", "rcnn_reg_pred = model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred)", "exit(1) C.num_rois = int(options.num_rois) if options.network == 'vgg': C.network = 'vgg' from keras_frcnn", "rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is a model that holds", "neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] ==", "tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples =", "bounding boxes from RPN: {class_acc}') print(f'Loss RPN classifier: {loss_rpn_cls}') print(f'Loss RPN regression: {loss_rpn_regr}')", "from the command line, and persist them in the config object C =", "keras_frcnn import config, data_generators from keras_frcnn import losses as losses import keras_frcnn.roi_helpers as", "parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number of epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to store all", "= [] if C.verbose: print(\"Validation Metrics: \") print( f'Mean number of bounding boxes", "dest=\"rot_90\", help=\"Augment with 90 degree rotations in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\",", "rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true,", "class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = []", "1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples", "import backend as K from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.layers import Input", "step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples", "note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS =", "be one of 'pascal_voc' or 'simple'\") # pass the settings from the command", "to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if", "k, v in class_mapping.items()} global_step = tf.convert_to_tensor(0, tf.int64) one_step = tf.convert_to_tensor(1, tf.int64) print(\"Training", "= [] if C.verbose: print( f'\\nMean number of bounding boxes from RPN overlapping", "in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print(f'Num classes (including bg) = {len(classes_count)}')", "Model([img_input, roi_input], rpn[:2] + classifier) # Defining optimizers for all models optimizer_rpn =", "converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data,", "resnet as nn C.network = 'resnet50' else: print('Not a valid model') raise ValueError", "import Adam, SGD from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from keras_frcnn", "' f'from RPN = {mean_overlapping_bboxes} for {step} previous iterations') if mean_overlapping_bboxes == 0:", "y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step( global_step, x_batch_val, y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C,", "reg Loss: %.4f\" % ( # step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) #", "= pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples", "batches of the dataset. for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train): # print(step,", "= fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)])", "start_time = time.time() class_mapping_inv = {v: k for k, v in class_mapping.items()} global_step", "Model([img_input, roi_input], classifier) # this is a model that holds both the RPN", "\" load default weights provided by keras.\") (options, args) = parser.parse_args() if not", "num_valid_imgs // BATCH_SIZE losses = np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = []", "enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true = y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step( global_step, x_batch_val,", "from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.layers import Input from tensorflow.keras.models import Model", "settings from the command line, and persist them in the config object C", "C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the", "global_step = tf.convert_to_tensor(0, tf.int64) one_step = tf.convert_to_tensor(1, tf.int64) print(\"Training started for %d epochs\"", "of simple or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number of RoIs to", "C.use_vertical_flips = bool(options.vertical_flips) C.rot_90 = bool(options.rot_90) C.model_path = options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path)", "or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number of RoIs to process at", "X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=False)", "= float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print( f'\\nMean number", "to load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier)", "import Input from tensorflow.keras.models import Model from keras_frcnn import config, data_generators from keras_frcnn", "== 'resnet50': from keras_frcnn import resnet as nn C.network = 'resnet50' else: print('Not", "in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number of epochs.\", default=2000) parser.add_option(\"--config_filename\",", "C, nn.get_img_output_length, K.image_data_format(), mode='val') if K.image_data_format() == 'channels_first': input_shape_img = (3, None, None)", "replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples +", "loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss,", "step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train): # print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true = y_batch_train", "keras_frcnn import vgg as nn elif options.network == 'resnet50': from keras_frcnn import resnet", "[] print(f'\\nValidation: Average number of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes}')", "will try to\" \" load default weights provided by keras.\") (options, args) =", "models optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01) # Accuracy metrics", "SGD(learning_rate=0.01) # Accuracy metrics for Fast RCNN model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric =", "rpn_train_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred", "+ '{:04d}'.format( epoch) + model_path_regex.group(2)) break # # Log every 10 steps. #", "# if filename is not given parser.error('Error: path to training data must be", "get_data elif options.parser == 'simple': from keras_frcnn.simple_parser import get_data else: raise ValueError(\"Command line", "= (3, None, None) else: input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img)", "y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) #", "for %d epochs\" % n_epochs) for epoch in range(n_epochs): print(\"\\nStart of epoch %d\"", "and persist them in the config object C = config.Config() C.use_horizontal_flips = bool(options.horizontal_flips)", "config, data_generators from keras_frcnn import losses as losses import keras_frcnn.roi_helpers as roi_helpers from", "args) = parser.parse_args() if not options.train_path: # if filename is not given parser.error('Error:", "# define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) *", "training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss],", "tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32)", "correct results') num_imgs = len(train_imgs) num_valid_imgs = len(val_imgs) print(f'Num train samples {len(train_imgs)}') print(f'Num", "losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1) # tensorboard writer, automatically creates directory and", "train_writer = tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step, x_batch_train, y_batch_train): with tf.GradientTape()", "model C.base_net_weights = nn.get_weight_path() train_imgs, classes_count, class_mapping = get_data(options.train_path) val_imgs, _, _ =", "0: # print(\"Step %d, RPN Cls Loss: %.4f RPN reg Loss: %.4f \"", "tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step( global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor) losses[step, 0]", "np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:,", "len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois", "x2_tensor, y1_tensor, y2_tensor) losses[step, 0] = rpn_class_loss losses[step, 1] = rpn_reg_loss losses[step, 2]", "tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() # Loss function of RPN model and Fast RCNN", "np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if", "define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)", "y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss", "rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] valid_losses = np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid = []", "np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] valid_losses = np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid", ") / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print(f'\\nAverage number of overlapping bounding boxes '", "@tf.function def rpn_train_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train", "n_steps = num_imgs // BATCH_SIZE n_valid_steps = num_valid_imgs // BATCH_SIZE losses = np.zeros((n_steps,", "as config_f: pickle.dump(C, config_f) print(f'Config has been written to {config_output_filename}, ' f'and can", "exists, it will delete the directory if os.path.exists('logs'): shutil.rmtree('logs') parser = OptionParser() parser.add_option(\"-p\",", "np.mean(valid_losses[:, 3]) class_acc = np.mean(valid_losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid", "x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2],", "Adam, SGD from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from keras_frcnn import", "must have .hdf5 filetype') exit(1) C.num_rois = int(options.num_rois) if options.network == 'vgg': C.network", "vertical flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment with 90", "RPN regression: {loss_rpn_regr}') print(f'Loss Detector classifier: {loss_class_cls}') print(f'Loss Detector regression: {loss_class_regr}') print(f'Elapsed time:", "y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true,", "= rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write training loss and accuracy to the tensorboard with", "# pass the settings from the command line, and persist them in the", "RPN and the classifier, # used to load/save weights for the models model_all", "fast_rcnn_class_acc = frcnn_valid_step( global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor) valid_losses[step, 0] = rpn_class_loss valid_losses[step,", "and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step)", "be specified. Pass --path to command line') if options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser", "== 'channels_first': input_shape_img = (3, None, None) else: input_shape_img = (None, None, 3)", "for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true = y_batch_val y_rpn_cls_pred, y_rpn_regr_pred,", "keras_frcnn import resnet as nn C.network = 'resnet50' else: print('Not a valid model')", "- 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = []", "_ = get_data(options.valid_path) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] =", "+ classifier) # Defining optimizers for all models optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier =", "= tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step( global_step, x_batch_train, y_batch_train) R", "options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif options.parser == 'simple': from keras_frcnn.simple_parser", "\" \"the training (to be used when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path", "% (epoch + 1,)) progbar = generic_utils.Progbar(n_steps) # Iterate over the batches of", "4)) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base", "training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write training loss", "mode='val') if K.image_data_format() == 'channels_first': input_shape_img = (3, None, None) else: input_shape_img =", "fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def rpn_valid_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true,", "for Fast RCNN model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() # Loss function", "keep training.') loss_rpn_cls = np.mean(valid_losses[:, 0]) loss_rpn_regr = np.mean(valid_losses[:, 1]) loss_class_cls = np.mean(valid_losses[:,", "if step % 10 == 0: # print(\"Step %d, RPN Cls Loss: %.4f", "sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) return sel_samples n_epochs = options.num_epochs BATCH_SIZE", "pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois", "help=\"Output path for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path for weights. If not", "2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) return sel_samples n_epochs = options.num_epochs", "roi_input], classifier) # this is a model that holds both the RPN and", "elif options.network == 'resnet50': from keras_frcnn import resnet as nn C.network = 'resnet50'", "fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0,", "np.mean(valid_losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = [] if C.verbose:", "len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if", "parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with vertical flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\",", "3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch =", "model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count)", "(x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train): # print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true = y_batch_train step", "y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred)", "= train_classifier_metric.result() # write training loss and accuracy to the tensorboard with train_writer.as_default():", "line if options.input_weight_path: C.base_net_weights = options.input_weight_path else: # set the path to weights", "5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] valid_losses = np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid =", "= rpn_class_loss valid_losses[step, 1] = rpn_reg_loss valid_losses[step, 2] = fast_rcnn_class_loss valid_losses[step, 3] =", "= time.time() if curr_loss < best_loss: if C.verbose: print( f'Total loss decreased from", "producing bounding boxes that overlap the ground truth boxes.' ' Check RPN settings", "sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois", "f'from RPN = {mean_overlapping_bboxes} for {step} previous iterations') if mean_overlapping_bboxes == 0: print('RPN", "in class_mapping.items()} global_step = tf.convert_to_tensor(0, tf.int64) one_step = tf.convert_to_tensor(1, tf.int64) print(\"Training started for", "action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment with 90 degree rotations in training. (Default=false).\",", "global_step, x_batch_train, y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) #", "nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input],", "import np_config np_config.enable_numpy_behavior() # if Logs path directory exists, it will delete the", "= 'resnet50' else: print('Not a valid model') raise ValueError # check if weight", "%d\" % (epoch + 1,)) progbar = generic_utils.Progbar(n_steps) # Iterate over the batches", "valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss,", "mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print( f'\\nMean", "bool(options.vertical_flips) C.rot_90 = bool(options.rot_90) C.model_path = options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2)", "valid_losses[step, 0] = rpn_class_loss valid_losses[step, 1] = rpn_reg_loss valid_losses[step, 2] = fast_rcnn_class_loss valid_losses[step,", "= tf.keras.metrics.CategoricalAccuracy() # Loss function of RPN model and Fast RCNN model rpn_class_loss_fn", ".hdf5 filetype') exit(1) C.num_rois = int(options.num_rois) if options.network == 'vgg': C.network = 'vgg'", "previous iterations') if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes that", "train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result() # write training loss and accuracy to the", "all models optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01) # Accuracy", "tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step( global_step, x_batch_train, x2_tensor, y1_tensor,", "model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads =", "metadata related to \" \"the training (to be used when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\",", "that holds both the RPN and the classifier, # used to load/save weights", "4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print(", "training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number of epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\",", "tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras.optimizers import Adam,", "@tf.function def rpn_valid_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train", "= rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write training loss and accuracy to", "y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_train_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as", "roi_helpers from tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() # if", "x_batch_val, x2_tensor, y1_tensor, y2_tensor) valid_losses[step, 0] = rpn_class_loss valid_losses[step, 1] = rpn_reg_loss valid_losses[step,", "neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples =", "= np.mean(valid_losses[:, 2]) loss_class_regr = np.mean(valid_losses[:, 3]) class_acc = np.mean(valid_losses[:, 4]) mean_overlapping_bboxes =", "dest=\"num_epochs\", help=\"Number of epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to store all the metadata", "(3, None, None) else: input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input", "= [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois //", "= rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads,", "or resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with horizontal flips in training. (Default=false).\", action=\"store_true\",", "y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step( global_step, x_batch_val, y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred,", "at the end of each epoch train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps) # Iterate over", "if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples)))", "default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with vertical flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\",", "by keras.\") (options, args) = parser.parse_args() if not options.train_path: # if filename is", "+ \"_\" + '{:04d}'.format( epoch) + model_path_regex.group(2)) break # # Log every 10", "and the classifier, # used to load/save weights for the models model_all =", "== 'simple': from keras_frcnn.simple_parser import get_data else: raise ValueError(\"Command line option parser must", "% 10 == 0: # print(\"Step %d, RPN Cls Loss: %.4f RPN reg", "tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true =", "X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor =", "the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier", "nn.get_weight_path() train_imgs, classes_count, class_mapping = get_data(options.train_path) val_imgs, _, _ = get_data(options.valid_path) if 'bg'", ":], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :],", "1) # tensorboard writer, automatically creates directory and writes logs train_writer = tf.summary.create_file_writer('logs/train/')", "class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print(f'Num classes (including bg) = {len(classes_count)}') config_output_filename", "float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) # Reset training metrics at the end of", "step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_valid_step(step, x_batch_train,", "Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train, img_data) in", "= losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1) # tensorboard writer,", "to the tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc,", "rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_train_step(step,", "Cls Loss: %.4f FRCNN reg Loss: %.4f\" % ( # step, float(rpn_class_loss), float(rpn_reg_loss),", "C.base_net_weights = options.input_weight_path else: # set the path to weights based on backend", "ValueError # check if weight path was passed via command line if options.input_weight_path:", "rpn_accuracy_for_epoch_valid = [] best_loss = np.Inf start_time = time.time() class_mapping_inv = {v: k", "related to \" \"the training (to be used when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\",", "int(options.num_rois) if options.network == 'vgg': C.network = 'vgg' from keras_frcnn import vgg as", "or 'simple'\") # pass the settings from the command line, and persist them", "for bounding boxes from RPN: {class_acc}') print(f'Loss RPN classifier: {loss_rpn_cls}') print(f'Loss RPN regression:", "boxes from RPN: {class_acc}') print(f'Loss RPN classifier: {loss_rpn_cls}') print(f'Loss RPN regression: {loss_rpn_regr}') print(f'Loss", "float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) # Reset training metrics at the end of each epoch", "else: input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4))", "selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois -", "np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else:", "for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path for weights. If not specified, will", "= options.num_epochs BATCH_SIZE = 1 n_steps = num_imgs // BATCH_SIZE n_valid_steps = num_valid_imgs", "losses as losses import keras_frcnn.roi_helpers as roi_helpers from tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000) from", "to the tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred,", "step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true = y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss,", "float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) # Reset training metrics at the end of each", "= 1 n_steps = num_imgs // BATCH_SIZE n_valid_steps = num_valid_imgs // BATCH_SIZE losses", "img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples = get_selected_samples(Y1,", "of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes}') if mean_overlapping_bboxes == 0:", "np.mean(valid_losses[:, 0]) loss_rpn_regr = np.mean(valid_losses[:, 1]) loss_class_cls = np.mean(valid_losses[:, 2]) loss_class_regr = np.mean(valid_losses[:,", "import division import random import pprint import sys import time import numpy as", "rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :],", "= options.config_filename with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print(f'Config has been written", "data must be specified. Pass --path to command line') if options.parser == 'pascal_voc':", "tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_valid_step(step, x_batch_train, X2,", "the config object C = config.Config() C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips) C.rot_90", "rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write training loss and accuracy to the", "as losses import keras_frcnn.roi_helpers as roi_helpers from tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops", "if C.verbose: print( f'Total loss decreased from {best_loss} to {curr_loss}, saving weights') best_loss", "rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write training loss and accuracy", "rpn_reg_loss losses[step, 2] = fast_rcnn_class_loss losses[step, 3] = fast_rcnn_reg_loss losses[step, 4] = fast_rcnn_class_acc", "with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1,", "= np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0]", "== 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = []", "{mean_overlapping_bboxes}') if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes that overlap", "neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples =", "tensorflow.keras.layers import Input from tensorflow.keras.models import Model from keras_frcnn import config, data_generators from", "accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return", "n_epochs) for epoch in range(n_epochs): print(\"\\nStart of epoch %d\" % (epoch + 1,))", "If not specified, will try to\" \" load default weights provided by keras.\")", "degree rotations in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number of epochs.\",", "tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def rpn_valid_step(step,", "= fast_rcnn_reg_loss valid_losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss),", "num_imgs = len(train_imgs) num_valid_imgs = len(val_imgs) print(f'Num train samples {len(train_imgs)}') print(f'Num val samples", "has been written to {config_output_filename}, ' f'and can be loaded when testing to", ") / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation: Average number of overlapping bounding boxes", "frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss =", "np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:,", "val samples {len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val =", "(options, args) = parser.parse_args() if not options.train_path: # if filename is not given", "from keras_frcnn import losses as losses import keras_frcnn.roi_helpers as roi_helpers from tensorflow.python.keras.utils import", "( # step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) # Reset training metrics at", "tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc", "fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step( global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor) valid_losses[step, 0] = rpn_class_loss", "flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with vertical flips in", "y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss", "in the extreme case where num_rois = 1, we pick a random pos", "np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch))", "fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result() # write training loss", "and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)", "where num_rois = 1, we pick a random pos or neg sample selected_pos_samples", "print(f'Num train samples {len(train_imgs)}') print(f'Num val samples {len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C,", "{loss_class_cls}') print(f'Loss Detector regression: {loss_class_regr}') print(f'Elapsed time: {time.time() - start_time}') curr_loss = loss_rpn_cls", "print(f'Num classes (including bg) = {len(classes_count)}') config_output_filename = options.config_filename with open(config_output_filename, 'wb') as", "%.4f FRCNN reg Loss: %.4f\" % ( # step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), #", "k for k, v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print(f'Num classes", "neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples =", "config_output_filename = options.config_filename with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print(f'Config has been", "number of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes} for {step} previous", "ValueError(\"Command line option parser must be one of 'pascal_voc' or 'simple'\") # pass", "tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred)", "Loss function of RPN model and Fast RCNN model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn", "fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result()", "tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step( global_step, x_batch_val, x2_tensor, y1_tensor,", "tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True) rpn_class_loss", "if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist()", "tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() # if Logs path", "Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is a model that", "y_rpn_cls_true, y_rpn_regr_true = y_batch_train step = tf.cast(step, dtype=tf.int64) global_step = tf.add(global_step, one_step) y_rpn_cls_pred,", "# Defining optimizers for all models optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all", "= [] best_loss = np.Inf start_time = time.time() class_mapping_inv = {v: k for", "2] = fast_rcnn_class_loss losses[step, 3] = fast_rcnn_reg_loss losses[step, 4] = fast_rcnn_class_acc progbar.update(step +", "config_f) print(f'Config has been written to {config_output_filename}, ' f'and can be loaded when", "fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result() # write", "= tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true", "= pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if", "# write training loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss,", "= rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write training loss and accuracy", "rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1]", "of epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to store all the metadata related to", "of 'pascal_voc' or 'simple'\") # pass the settings from the command line, and", "print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true = y_batch_train step = tf.cast(step, dtype=tf.int64) global_step = tf.add(global_step,", "y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss,", "curr_loss) start_time = time.time() if curr_loss < best_loss: if C.verbose: print( f'Total loss", "for k, v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print(f'Num classes (including", "selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples", "valid_losses[step, 1] = rpn_reg_loss valid_losses[step, 2] = fast_rcnn_class_loss valid_losses[step, 3] = fast_rcnn_reg_loss valid_losses[step,", "\") print( f'Mean number of bounding boxes from RPN overlapping ' f'ground truth", "def rpn_valid_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred,", "nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='val') if K.image_data_format()", "rotations in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number of epochs.\", default=2000)", "default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment with 90 degree rotations in training. (Default=false).\", action=\"store_true\",", "fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights)", "settings or keep training.') loss_rpn_cls = np.mean(valid_losses[:, 0]) loss_rpn_regr = np.mean(valid_losses[:, 1]) loss_class_cls", "C, nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='val') if", "f'from RPN = {mean_overlapping_bboxes}') if mean_overlapping_bboxes == 0: print('RPN is not producing bounding", "sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples)", "results') num_imgs = len(train_imgs) num_valid_imgs = len(val_imgs) print(f'Num train samples {len(train_imgs)}') print(f'Num val", "len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count),", "losses[step, 3] = fast_rcnn_reg_loss losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss),", "RCNN model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn =", "return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2): with", "parser.add_option(\"--network\", dest=\"network\", help=\"Base network to use. Supports vgg or resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\",", "= loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total validation loss: %.4f\" %", "frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss =", "print(\"Total Loss: %.4f\" % curr_loss) start_time = time.time() if curr_loss < best_loss: if", "fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step( global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor) valid_losses[step, 0] =", "== 0: # print(\"Step %d, RPN Cls Loss: %.4f RPN reg Loss: %.4f", "def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred =", "= len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois,", "_, _ = get_data(options.valid_path) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg']", "classifier) # this is a model that holds both the RPN and the", "filename is not given parser.error('Error: path to training data must be specified. Pass", "[('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_steps -", "None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input, trainable=True)", "fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result() # write training loss and accuracy", "epoch) + model_path_regex.group(2)) break # # Log every 10 steps. # if step", "Metrics: \") print( f'Mean number of bounding boxes from RPN overlapping ' f'ground", "not specified, will try to\" \" load default weights provided by keras.\") (options,", "rpn_accuracy_rpn_monitor = [] print(f'\\nAverage number of overlapping bounding boxes ' f'from RPN =", "overlapping ' f'ground truth boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy for bounding boxes from RPN:", "data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path to validation data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser to", "is not given parser.error('Error: path to training data must be specified. Pass --path", "rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result() # write training loss and accuracy to the tensorboard", "+ loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total validation loss: %.4f\" % curr_loss) start_time", "= get_data(options.valid_path) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping)", "bounding boxes ' f'from RPN = {mean_overlapping_bboxes} for {step} previous iterations') if mean_overlapping_bboxes", "== 'vgg': C.network = 'vgg' from keras_frcnn import vgg as nn elif options.network", "= re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2) != '.hdf5': print('Output weights must have .hdf5 filetype')", "input_shape_img = (3, None, None) else: input_shape_img = (None, None, 3) img_input =", "one of 'pascal_voc' or 'simple'\") # pass the settings from the command line,", "class_mapping_inv = {v: k for k, v in class_mapping.items()} global_step = tf.convert_to_tensor(0, tf.int64)", "dest=\"num_rois\", help=\"Number of RoIs to process at once.\", default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base network", "= roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from", "each epoch train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps) # Iterate over the batches of the", "of each epoch train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps) # Iterate over the batches of", "+ loss_class_cls + loss_class_regr print(\"Total Loss: %.4f\" % curr_loss) start_time = time.time() if", "curr_loss < best_loss: if C.verbose: print( f'Total loss decreased from {best_loss} to {curr_loss},", "= tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step( global_step, x_batch_val, x2_tensor,", "bg) = {len(classes_count)}') config_output_filename = options.config_filename with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f)", "RPN Cls Loss: %.4f RPN reg Loss: %.4f \" # \"FRCNN Cls Loss:", "# float(fast_rcnn_reg_loss))) # Reset training metrics at the end of each epoch train_classifier_metric.reset_states()", "y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write training loss and accuracy to the", "= frcnn_valid_step( global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor) valid_losses[step, 0] = rpn_class_loss valid_losses[step, 1]", "np_config np_config.enable_numpy_behavior() # if Logs path directory exists, it will delete the directory", "= roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples", "print( f'Total loss decreased from {best_loss} to {curr_loss}, saving weights') best_loss = curr_loss", "= nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input,", "class_acc = np.mean(valid_losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = []", "mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes that overlap the ground", "fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step( global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor) losses[step, 0] =", "selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples)", "= 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping inv_map = {v: k for", "np.Inf start_time = time.time() class_mapping_inv = {v: k for k, v in class_mapping.items()}", "classifier) # Defining optimizers for all models optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5)", "= roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples", "= class_mapping inv_map = {v: k for k, v in class_mapping.items()} print('Training images", "fast_rcnn_class_acc = val_classifier_metric.result() # write training loss and accuracy to the tensorboard with", "decreased from {best_loss} to {curr_loss}, saving weights') best_loss = curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\"", "as nn elif options.network == 'resnet50': from keras_frcnn import resnet as nn C.network", "tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() # if Logs path directory exists, it will delete", "print(f'\\nAverage number of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes} for {step}", "pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1:", "epoch train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps) # Iterate over the batches of the dataset.", "print('Output weights must have .hdf5 filetype') exit(1) C.num_rois = int(options.num_rois) if options.network ==", "the tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)", "= Model([img_input, roi_input], classifier) # this is a model that holds both the", "1]) loss_class_cls = np.mean(valid_losses[:, 2]) loss_class_regr = np.mean(valid_losses[:, 3]) class_acc = np.mean(valid_losses[:, 4])", "rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples", "%.4f\" % curr_loss) start_time = time.time() if curr_loss < best_loss: if C.verbose: print(", "optparse import OptionParser import pickle import os import re import shutil import tensorflow", "train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() # Loss function of RPN model and", "and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print(f'\\nAverage number", "valid_losses[step, 2] = fast_rcnn_class_loss valid_losses[step, 3] = fast_rcnn_reg_loss valid_losses[step, 4] = fast_rcnn_class_acc progbar.update(step", "img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input, trainable=True) # define", "weights based on backend and model C.base_net_weights = nn.get_weight_path() train_imgs, classes_count, class_mapping =", "the end of each epoch train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps) # Iterate over the", "from keras_frcnn import config, data_generators from keras_frcnn import losses as losses import keras_frcnn.roi_helpers", "import re import shutil import tensorflow as tf from tensorflow.keras import backend as", "epochs\" % n_epochs) for epoch in range(n_epochs): print(\"\\nStart of epoch %d\" % (epoch", "%d, RPN Cls Loss: %.4f RPN reg Loss: %.4f \" # \"FRCNN Cls", "time import numpy as np from optparse import OptionParser import pickle import os", "weights. If not specified, will try to\" \" load default weights provided by", "step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) # Reset training metrics at the end", "3]) class_acc = np.mean(valid_losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid =", "y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred)", "* len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)", "nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this", "x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32) y2_tensor", "from __future__ import division import random import pprint import sys import time import", "random import pprint import sys import time import numpy as np from optparse", "'simple': from keras_frcnn.simple_parser import get_data else: raise ValueError(\"Command line option parser must be", "1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples", "= get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:,", "mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = [] if C.verbose: print(\"Validation Metrics:", "not given parser.error('Error: path to training data must be specified. Pass --path to", "losses = np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] valid_losses = np.zeros((n_valid_steps,", "model_classifier = Model([img_input, roi_input], classifier) # this is a model that holds both", "= y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss =", "optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write training loss and accuracy to the tensorboard with train_writer.as_default():", "%.4f\" % ( # step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) # Reset training", "class:') pprint.pprint(classes_count) print(f'Num classes (including bg) = {len(classes_count)}') config_output_filename = options.config_filename with open(config_output_filename,", "bounding boxes from RPN overlapping ' f'ground truth boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy for", "print(f'Loss Detector classifier: {loss_class_cls}') print(f'Loss Detector regression: {loss_class_regr}') print(f'Elapsed time: {time.time() - start_time}')", "= get_data(options.train_path) val_imgs, _, _ = get_data(options.valid_path) if 'bg' not in classes_count: classes_count['bg']", "sel_samples, :], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc =", "= time.time() class_mapping_inv = {v: k for k, v in class_mapping.items()} global_step =", "= y_batch_train step = tf.cast(step, dtype=tf.int64) global_step = tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss,", "and model C.base_net_weights = nn.get_weight_path() train_imgs, classes_count, class_mapping = get_data(options.train_path) val_imgs, _, _", "Loss: %.4f\" % curr_loss) start_time = time.time() if curr_loss < best_loss: if C.verbose:", "the classifier, # used to load/save weights for the models model_all = Model([img_input,", "for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train): # print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true =", "# in the extreme case where num_rois = 1, we pick a random", "= model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads", "print(f'Loss Detector regression: {loss_class_regr}') print(f'Elapsed time: {time.time() - start_time}') curr_loss = loss_rpn_cls +", "import resnet as nn C.network = 'resnet50' else: print('Not a valid model') raise", "a model that holds both the RPN and the classifier, # used to", "tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:,", "== 0: print('RPN is not producing bounding boxes that overlap the ground truth", "+ 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step ==", "of RPN model and Fast RCNN model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors)", "+ loss_class_regr print(\"Total Loss: %.4f\" % curr_loss) start_time = time.time() if curr_loss <", "else: raise ValueError(\"Command line option parser must be one of 'pascal_voc' or 'simple'\")", "line, and persist them in the config object C = config.Config() C.use_horizontal_flips =", "if X2 is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor", "bounding boxes ' f'from RPN = {mean_overlapping_bboxes}') if mean_overlapping_bboxes == 0: print('RPN is", "model_path_regex.group(2) != '.hdf5': print('Output weights must have .hdf5 filetype') exit(1) C.num_rois = int(options.num_rois)", "to weights based on backend and model C.base_net_weights = nn.get_weight_path() train_imgs, classes_count, class_mapping", "the tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)", "valid_losses = np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid = [] best_loss = np.Inf", "loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total validation loss: %.4f\" % curr_loss) start_time =", "boxes from RPN overlapping ' f'ground truth boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy for bounding", "or keep training.') loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls =", "# \"FRCNN Cls Loss: %.4f FRCNN reg Loss: %.4f\" % ( # step,", "as roi_helpers from tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() #", "must be one of 'pascal_voc' or 'simple'\") # pass the settings from the", "with 90 degree rotations in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number", "(Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment with 90 degree rotations in training.", "[] rpn_accuracy_for_epoch = [] valid_losses = np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid =", "tf from tensorflow.keras import backend as K from tensorflow.keras.optimizers import Adam, SGD from", "step = tf.cast(step, dtype=tf.int64) global_step = tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss =", "print(f'Config has been written to {config_output_filename}, ' f'and can be loaded when testing", "if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples,", "numpy as np from optparse import OptionParser import pickle import os import re", "(x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)", "(to be used when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path for weights.\", default='./model_frcnn.hdf5')", "training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with vertical flips in training. (Default=false).\",", "and Fast RCNN model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss()", "the command line, and persist them in the config object C = config.Config()", "def frcnn_train_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred =", "command line') if options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif options.parser ==", "in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true = y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step( global_step,", "start_time = time.time() if curr_loss < best_loss: if C.verbose: print( f'Total loss decreased", "import sys import time import numpy as np from optparse import OptionParser import", "< best_loss: if C.verbose: print( f'Total loss decreased from {best_loss} to {curr_loss}, saving", "trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is", "+ 1,)) progbar = generic_utils.Progbar(n_steps) # Iterate over the batches of the dataset.", "to use. Supports vgg or resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with horizontal flips", "write training loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step)", "('detector_regr', fast_rcnn_reg_loss)]) if step == n_valid_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid)", "/ len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print( f'\\nMean number of bounding", "= losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1)", "from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from keras_frcnn import config, data_generators", "with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss", "rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32)", "\"--path\", dest=\"train_path\", help=\"Path to training data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path to validation data.\")", "and writes logs train_writer = tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step, x_batch_train,", "and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation: Average", "class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping inv_map = {v: k for k, v", "RoIs to process at once.\", default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base network to use. Supports", "None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples,", "rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples =", "= [] print(f'\\nAverage number of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes}", "sys import time import numpy as np from optparse import OptionParser import pickle", "C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples", "the path to weights based on backend and model C.base_net_weights = nn.get_weight_path() train_imgs,", "# tensorboard writer, automatically creates directory and writes logs train_writer = tf.summary.create_file_writer('logs/train/') valid_writer", "'vgg' from keras_frcnn import vgg as nn elif options.network == 'resnet50': from keras_frcnn", "= data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='val') if K.image_data_format() == 'channels_first': input_shape_img =", "training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss],", "step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def rpn_valid_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as", "rpn_valid_step( global_step, x_batch_val, y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300)", "from keras_frcnn import vgg as nn elif options.network == 'resnet50': from keras_frcnn import", "be loaded when testing to ensure correct results') num_imgs = len(train_imgs) num_valid_imgs =", "np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist()", "else: print('Not a valid model') raise ValueError # check if weight path was", "= np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if", "vgg or resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with horizontal flips in training. (Default=false).\",", "np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist()", "if Logs path directory exists, it will delete the directory if os.path.exists('logs'): shutil.rmtree('logs')", "0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois >", "y_rpn_regr_true = y_batch_train step = tf.cast(step, dtype=tf.int64) global_step = tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred,", "the RPN and the classifier, # used to load/save weights for the models", "help=\"Augment with horizontal flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with", "= Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input, trainable=True) # define the", "len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case", "# write training loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss,", "rpn_reg_loss valid_losses[step, 2] = fast_rcnn_class_loss valid_losses[step, 3] = fast_rcnn_reg_loss valid_losses[step, 4] = fast_rcnn_class_acc", "= np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes =", "% n_epochs) for epoch in range(n_epochs): print(\"\\nStart of epoch %d\" % (epoch +", "0] = rpn_class_loss valid_losses[step, 1] = rpn_reg_loss valid_losses[step, 2] = fast_rcnn_class_loss valid_losses[step, 3]", "1] = rpn_reg_loss valid_losses[step, 2] = fast_rcnn_class_loss valid_losses[step, 3] = fast_rcnn_reg_loss valid_losses[step, 4]", "tensorflow.keras.models import Model from keras_frcnn import config, data_generators from keras_frcnn import losses as", "optimizer_all = SGD(learning_rate=0.01) # Accuracy metrics for Fast RCNN model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy()", "roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples =", "write training loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step)", "classifier, # used to load/save weights for the models model_all = Model([img_input, roi_input],", "the ground truth boxes.' ' Check RPN settings or keep training.') loss_rpn_cls =", "tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step( global_step,", "2] = fast_rcnn_class_loss valid_losses[step, 3] = fast_rcnn_reg_loss valid_losses[step, 4] = fast_rcnn_class_acc progbar.update(step +", "Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss", "= np.mean(valid_losses[:, 0]) loss_rpn_regr = np.mean(valid_losses[:, 1]) loss_class_cls = np.mean(valid_losses[:, 2]) loss_class_regr =", "= float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation: Average number of overlapping", "writer, automatically creates directory and writes logs train_writer = tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/')", "parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path for weights. If not specified, will try to\" \"", "specified, will try to\" \" load default weights provided by keras.\") (options, args)", "loss_class_cls = np.mean(valid_losses[:, 2]) loss_class_regr = np.mean(valid_losses[:, 3]) class_acc = np.mean(valid_losses[:, 4]) mean_overlapping_bboxes", "rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_valid_step(step, x_batch_train, X2, Y1,", "step == n_valid_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid)", "parser = OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path to training data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\",", "1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4])", "X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=True)", "return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_train_step(step, x_batch_train, X2, Y1, Y2): with", "if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes that overlap the", "rpn_accuracy_for_epoch = [] if C.verbose: print( f'\\nMean number of bounding boxes from RPN", "weights must have .hdf5 filetype') exit(1) C.num_rois = int(options.num_rois) if options.network == 'vgg':", "function of RPN model and Fast RCNN model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn =", "Loss: %.4f RPN reg Loss: %.4f \" # \"FRCNN Cls Loss: %.4f FRCNN", "regression: {loss_class_regr}') print(f'Elapsed time: {time.time() - start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr +", "with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True)", "classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping inv_map = {v: k", "losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1) #", "= tf.convert_to_tensor(1, tf.int64) print(\"Training started for %d epochs\" % n_epochs) for epoch in", "we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples =", "= 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist()", "type=\"int\", dest=\"num_rois\", help=\"Number of RoIs to process at once.\", default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base", "[] rpn_accuracy_for_epoch_valid = [] best_loss = np.Inf start_time = time.time() class_mapping_inv = {v:", "rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write training loss and accuracy to", "RCNN model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() # Loss function of RPN", "= len(class_mapping) C.class_mapping = class_mapping inv_map = {v: k for k, v in", "= get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:,", "selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else:", "k for k, v in class_mapping.items()} global_step = tf.convert_to_tensor(0, tf.int64) one_step = tf.convert_to_tensor(1,", "y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train,", "rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_valid_step(step,", "of the dataset. for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true =", "= selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois =", "= nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input,", "y_rpn_regr_pred = model_rpn(x_batch_train, training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) #", "specified. Pass --path to command line') if options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import", "0: print('RPN is not producing bounding boxes that overlap the ground truth boxes.'", "C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h)", "print(f'Loss RPN regression: {loss_rpn_regr}') print(f'Loss Detector classifier: {loss_class_cls}') print(f'Loss Detector regression: {loss_class_regr}') print(f'Elapsed", "fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def rpn_valid_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true", "use. One of simple or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number of", "[] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples))", "= y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss =", "default weights provided by keras.\") (options, args) = parser.parse_args() if not options.train_path: #", "C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2,", "tf.cast(step, dtype=tf.int64) global_step = tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step( global_step,", "Average number of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes}') if mean_overlapping_bboxes", "boxes.' ' Check RPN settings or keep training.') loss_rpn_cls = np.mean(valid_losses[:, 0]) loss_rpn_regr", "= rpn_reg_loss valid_losses[step, 2] = fast_rcnn_class_loss valid_losses[step, 3] = fast_rcnn_reg_loss valid_losses[step, 4] =", "num_valid_imgs = len(val_imgs) print(f'Num train samples {len(train_imgs)}') print(f'Num val samples {len(val_imgs)}') data_gen_train =", "options.config_filename with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print(f'Config has been written to", "== n_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor", "= losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1) # tensorboard writer, automatically creates directory", "loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total Loss: %.4f\" % curr_loss) start_time", "losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1) # tensorboard writer, automatically", "n_valid_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid =", "= {mean_overlapping_bboxes} for {step} previous iterations') if mean_overlapping_bboxes == 0: print('RPN is not", "= np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc =", "import losses as losses import keras_frcnn.roi_helpers as roi_helpers from tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000)", "tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape:", "rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights))", "dest=\"horizontal_flips\", help=\"Augment with horizontal flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment", "1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print(f'\\nAverage", "once.\", default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base network to use. Supports vgg or resnet50.\", default='resnet50')", "truth boxes.' ' Check RPN settings or keep training.') loss_rpn_cls = np.mean(valid_losses[:, 0])", "[] valid_losses = np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid = [] best_loss =", "loss_class_regr print(\"Total Loss: %.4f\" % curr_loss) start_time = time.time() if curr_loss < best_loss:", "1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples =", "of the dataset. for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train): # print(step, img_data['filepath'])", "Detector classifier: {loss_class_cls}') print(f'Loss Detector regression: {loss_class_regr}') print(f'Elapsed time: {time.time() - start_time}') curr_loss", "layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers,", "class_mapping.items()} global_step = tf.convert_to_tensor(0, tf.int64) one_step = tf.convert_to_tensor(1, tf.int64) print(\"Training started for %d", "C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor,", "C.verbose: print( f'\\nMean number of bounding boxes from RPN overlapping ' f'ground truth", "RPN reg Loss: %.4f \" # \"FRCNN Cls Loss: %.4f FRCNN reg Loss:", "# if Logs path directory exists, it will delete the directory if os.path.exists('logs'):", "progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step", "loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total Loss: %.4f\" % curr_loss) start_time = time.time()", "options.input_weight_path: C.base_net_weights = options.input_weight_path else: # set the path to weights based on", "(Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with vertical flips in training. (Default=false).\", action=\"store_true\",", "1,)) progbar = generic_utils.Progbar(n_steps) # Iterate over the batches of the dataset. for", "nn.get_img_output_length, K.image_data_format(), mode='val') if K.image_data_format() == 'channels_first': input_shape_img = (3, None, None) else:", "y1_tensor, y2_tensor) losses[step, 0] = rpn_class_loss losses[step, 1] = rpn_reg_loss losses[step, 2] =", "'resnet50': from keras_frcnn import resnet as nn C.network = 'resnet50' else: print('Not a", "from keras_frcnn.pascal_voc_parser import get_data elif options.parser == 'simple': from keras_frcnn.simple_parser import get_data else:", "y_batch_val, img_data) in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true = y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss =", "frcnn_valid_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train,", "raise ValueError # check if weight path was passed via command line if", "print('Training images per class:') pprint.pprint(classes_count) print(f'Num classes (including bg) = {len(classes_count)}') config_output_filename =", "import generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() # if Logs path directory", "y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred)", "# if step % 10 == 0: # print(\"Step %d, RPN Cls Loss:", "# Reset training metrics at the end of each epoch train_classifier_metric.reset_states() progbar =", "/ len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation: Average number of overlapping bounding boxes '", "C.network = 'resnet50' else: print('Not a valid model') raise ValueError # check if", "[] print(f'\\nAverage number of overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes} for", "y_rpn_regr_pred) # write training loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss',", "{loss_class_regr}') print(f'Elapsed time: {time.time() - start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls", "np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose:", "rpn_class_loss, rpn_reg_loss = rpn_train_step( global_step, x_batch_train, y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(),", "v in class_mapping.items()} global_step = tf.convert_to_tensor(0, tf.int64) one_step = tf.convert_to_tensor(1, tf.int64) print(\"Training started", "curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total Loss: %.4f\" %", "BATCH_SIZE losses = np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] valid_losses =", "sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step( global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor)", "testing to ensure correct results') num_imgs = len(train_imgs) num_valid_imgs = len(val_imgs) print(f'Num train", "{step} previous iterations') if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes", "training metrics at the end of each epoch train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps) #", "options.network == 'vgg': C.network = 'vgg' from keras_frcnn import vgg as nn elif", "parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment with 90 degree rotations in training. (Default=false).\", action=\"store_true\", default=False)", "mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print(f'\\nAverage number of overlapping", "pickle.dump(C, config_f) print(f'Config has been written to {config_output_filename}, ' f'and can be loaded", "simple or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number of RoIs to process", "replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples,", "\"_\" + '{:04d}'.format( epoch) + model_path_regex.group(2)) break # # Log every 10 steps.", "y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape()", "the tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred,", "' Check RPN settings or keep training.') loss_rpn_cls = np.mean(valid_losses[:, 0]) loss_rpn_regr =", "tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor,", "loss_rpn_regr = np.mean(valid_losses[:, 1]) loss_class_cls = np.mean(valid_losses[:, 2]) loss_class_regr = np.mean(valid_losses[:, 3]) class_acc", "tensorflow.keras import backend as K from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.layers import", "get_data else: raise ValueError(\"Command line option parser must be one of 'pascal_voc' or", "load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) #", "One of simple or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number of RoIs", "len(class_mapping) C.class_mapping = class_mapping inv_map = {v: k for k, v in class_mapping.items()}", "with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print(f'Config has been written to {config_output_filename},", "all the metadata related to \" \"the training (to be used when testing).\",", "progbar = generic_utils.Progbar(n_steps) # Iterate over the batches of the dataset. for step,", "via command line if options.input_weight_path: C.base_net_weights = options.input_weight_path else: # set the path", "RPN classifier: {loss_rpn_cls}') print(f'Loss RPN regression: {loss_rpn_regr}') print(f'Loss Detector classifier: {loss_class_cls}') print(f'Loss Detector", "loss decreased from {best_loss} to {curr_loss}, saving weights') best_loss = curr_loss model_all.save_weights(model_path_regex.group(1) +", "rpn_accuracy_for_epoch_valid = [] if C.verbose: print(\"Validation Metrics: \") print( f'Mean number of bounding", "= config.Config() C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips) C.rot_90 = bool(options.rot_90) C.model_path =", "= np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch)", "if step == n_valid_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) ) /", "import OptionParser import pickle import os import re import shutil import tensorflow as", "y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step( global_step, x_batch_val,", "model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result() # write training loss and accuracy to", "0]) loss_rpn_regr = np.mean(valid_losses[:, 1]) loss_class_cls = np.mean(valid_losses[:, 2]) loss_class_regr = np.mean(valid_losses[:, 3])", "frcnn_train_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train,", "if curr_loss < best_loss: if C.verbose: print( f'Total loss decreased from {best_loss} to", "= model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1,", "as np from optparse import OptionParser import pickle import os import re import", "rpn_reg_loss = rpn_train_step( global_step, x_batch_train, y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True,", "losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss),", "data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C,", "= Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on", "= nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors", "('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_steps - 1 and", "path for weights. If not specified, will try to\" \" load default weights", "y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write training loss and", "('detector_regr', fast_rcnn_reg_loss)]) if step == n_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor)", "the settings from the command line, and persist them in the config object", "rpn_reg_loss @tf.function def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred,", "train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps) # Iterate over the batches of the dataset. for", "to use. One of simple or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number", "f'and can be loaded when testing to ensure correct results') num_imgs = len(train_imgs)", "1 n_steps = num_imgs // BATCH_SIZE n_valid_steps = num_valid_imgs // BATCH_SIZE losses =", "BATCH_SIZE = 1 n_steps = num_imgs // BATCH_SIZE n_valid_steps = num_valid_imgs // BATCH_SIZE", "print( f'\\nMean number of bounding boxes from RPN overlapping ' f'ground truth boxes:", "'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif options.parser == 'simple': from keras_frcnn.simple_parser import get_data", "= options.input_weight_path else: # set the path to weights based on backend and", "roi_input], rpn[:2] + classifier) # Defining optimizers for all models optimizer_rpn = Adam(learning_rate=1e-5)", ":, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples", "train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss,", "= Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01) # Accuracy metrics for Fast RCNN model train_classifier_metric", "{loss_rpn_regr}') print(f'Loss Detector classifier: {loss_class_cls}') print(f'Loss Detector regression: {loss_class_regr}') print(f'Elapsed time: {time.time() -", "[] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2:", "\" # \"FRCNN Cls Loss: %.4f FRCNN reg Loss: %.4f\" % ( #", "rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_steps - 1 and C.verbose:", "data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='val') if K.image_data_format() == 'channels_first': input_shape_img = (3,", "rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write training loss and accuracy to the tensorboard", "= tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() # Loss function of RPN model and Fast", "train_imgs, classes_count, class_mapping = get_data(options.train_path) val_imgs, _, _ = get_data(options.valid_path) if 'bg' not", "float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = [] if C.verbose: print(\"Validation Metrics: \") print(", "generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() # if Logs path directory exists,", "help=\"Path to training data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path to validation data.\") parser.add_option(\"-o\", \"--parser\",", "rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32)", "# write training loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss,", "used to load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2] +", "sel_samples n_epochs = options.num_epochs BATCH_SIZE = 1 n_steps = num_imgs // BATCH_SIZE n_valid_steps", "to\" \" load default weights provided by keras.\") (options, args) = parser.parse_args() if", "C.base_net_weights = nn.get_weight_path() train_imgs, classes_count, class_mapping = get_data(options.train_path) val_imgs, _, _ = get_data(options.valid_path)", "is not producing bounding boxes that overlap the ground truth boxes.' ' Check", "nn C.network = 'resnet50' else: print('Not a valid model') raise ValueError # check", "automatically creates directory and writes logs train_writer = tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function", "v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print(f'Num classes (including bg) =", "dest=\"network\", help=\"Base network to use. Supports vgg or resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment", "{curr_loss}, saving weights') best_loss = curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\" + '{:04d}'.format( epoch) +", "K.image_data_format() == 'channels_first': input_shape_img = (3, None, None) else: input_shape_img = (None, None,", "to training data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path to validation data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\",", "replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where", "float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation: Average number of overlapping bounding", "{v: k for k, v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print(f'Num", "accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return", "tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from", "tensorboard writer, automatically creates directory and writes logs train_writer = tf.summary.create_file_writer('logs/train/') valid_writer =", "pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist()", "curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total validation loss: %.4f\"", "IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue", "= data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length,", "data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='val') if K.image_data_format() == 'channels_first': input_shape_img", "keras_frcnn.roi_helpers as roi_helpers from tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior()", "overlapping bounding boxes ' f'from RPN = {mean_overlapping_bboxes} for {step} previous iterations') if", "' f'ground truth boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}')", "3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input, trainable=True) #", "return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def rpn_valid_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape:", "loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(", "[('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_valid_steps -", "network to use. Supports vgg or resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with horizontal", "tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss,", "os import re import shutil import tensorflow as tf from tensorflow.keras import backend", "as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False) rpn_class_loss =", "with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss,", "frcnn_train_step( global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor) losses[step, 0] = rpn_class_loss losses[step, 1] =", "import pickle import os import re import shutil import tensorflow as tf from", "horizontal flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with vertical flips", "tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def rpn_valid_step(step, x_batch_train, y_batch_train): with", "regression: {loss_rpn_regr}') print(f'Loss Detector classifier: {loss_class_cls}') print(f'Loss Detector regression: {loss_class_regr}') print(f'Elapsed time: {time.time()", "losses import keras_frcnn.roi_helpers as roi_helpers from tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import", "bool(options.rot_90) C.model_path = options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2) != '.hdf5': print('Output", "= float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print(f'\\nAverage number of overlapping bounding", "+ loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total Loss: %.4f\" % curr_loss) start_time =", "y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou", "K.image_data_format(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='val') if K.image_data_format() ==", "import tensorflow as tf from tensorflow.keras import backend as K from tensorflow.keras.optimizers import", "num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input,", "rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights)", "end of each epoch train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps) # Iterate over the batches", "data_generators from keras_frcnn import losses as losses import keras_frcnn.roi_helpers as roi_helpers from tensorflow.python.keras.utils", "import numpy as np from optparse import OptionParser import pickle import os import", "= rpn_valid_step( global_step, x_batch_val, y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7,", "started for %d epochs\" % n_epochs) for epoch in range(n_epochs): print(\"\\nStart of epoch", "y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step( global_step, x_batch_val, y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred,", "check if weight path was passed via command line if options.input_weight_path: C.base_net_weights =", "\"--rot_90\", dest=\"rot_90\", help=\"Augment with 90 degree rotations in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\",", "elif options.parser == 'simple': from keras_frcnn.simple_parser import get_data else: raise ValueError(\"Command line option", "fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights))", "# Loss function of RPN model and Fast RCNN model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors)", "Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss =", "RPN overlapping ' f'ground truth boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy for bounding boxes from", "be used when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\",", "nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors =", "Input from tensorflow.keras.models import Model from keras_frcnn import config, data_generators from keras_frcnn import", "(Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number of epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location", "model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() # Loss function of RPN model", "0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping inv_map = {v: k for k,", "open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print(f'Config has been written to {config_output_filename}, '", "rpn_accuracy_for_epoch_valid.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32)", "= np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0)", "OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path to training data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path to", "from keras_frcnn.simple_parser import get_data else: raise ValueError(\"Command line option parser must be one", "rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_valid_steps - 1 and C.verbose:", "Cls Loss: %.4f RPN reg Loss: %.4f \" # \"FRCNN Cls Loss: %.4f", "= frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result() # write", "start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total validation loss:", "if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples)", "= Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is a model", "rpn_class_loss, rpn_reg_loss = rpn_valid_step( global_step, x_batch_val, y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(),", "= len(val_imgs) print(f'Num train samples {len(train_imgs)}') print(f'Num val samples {len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs,", "if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping =", "= tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step( global_step, x_batch_train, x2_tensor,", "tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def", "a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if", ":], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step(", "= val_classifier_metric.result() # write training loss and accuracy to the tensorboard with valid_writer.as_default():", "' Check RPN settings or keep training.') loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr =", "1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_valid_steps", "= np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] valid_losses = np.zeros((n_valid_steps, 5))", "data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(),", "float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print( f'\\nMean number of", "roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier)", "not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping inv_map", "rpn_accuracy_for_epoch = [] valid_losses = np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid = []", "rpn[:2] + classifier) # Defining optimizers for all models optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier", "rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write training loss and accuracy to the tensorboard", "fast_rcnn_class_loss losses[step, 3] = fast_rcnn_reg_loss losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls',", "' f'from RPN = {mean_overlapping_bboxes}') if mean_overlapping_bboxes == 0: print('RPN is not producing", "model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred)", "% ( # step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) # Reset training metrics", "parser.parse_args() if not options.train_path: # if filename is not given parser.error('Error: path to", "to the tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc,", "start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total Loss: %.4f\"", "y2_tensor) losses[step, 0] = rpn_class_loss losses[step, 1] = rpn_reg_loss losses[step, 2] = fast_rcnn_class_loss", "the extreme case where num_rois = 1, we pick a random pos or", "= model_rpn(x_batch_train, training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write", "= bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips) C.rot_90 = bool(options.rot_90) C.model_path = options.output_weight_path model_path_regex =", "rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation: Average number of overlapping bounding boxes ' f'from RPN", "default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number of epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to store", "training (to be used when testing).\", default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path for weights.\",", "help=\"Base network to use. Supports vgg or resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with", "from tensorflow.keras import backend as K from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.layers", "= np.mean(valid_losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = [] if", "> 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois", "import pprint import sys import time import numpy as np from optparse import", "rcnn_reg_pred = model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred)", "{loss_rpn_cls}') print(f'Loss RPN regression: {loss_rpn_regr}') print(f'Loss Detector classifier: {loss_class_cls}') print(f'Loss Detector regression: {loss_class_regr}')", "default=\"config.pickle\") parser.add_option(\"--output_weight_path\", dest=\"output_weight_path\", help=\"Output path for weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path for", "is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:,", "fast_rcnn_reg_loss)]) if step == n_valid_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) )", "fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0, :, -1] ==", "{config_output_filename}, ' f'and can be loaded when testing to ensure correct results') num_imgs", "resnet50.\", default='resnet50') parser.add_option(\"--hf\", dest=\"horizontal_flips\", help=\"Augment with horizontal flips in training. (Default=false).\", action=\"store_true\", default=False)", "metrics at the end of each epoch train_classifier_metric.reset_states() progbar = generic_utils.Progbar(n_valid_steps) # Iterate", "rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write training loss", "fast_rcnn_class_acc = frcnn_train_step( global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor) losses[step, 0] = rpn_class_loss losses[step,", "y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to", "one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step( global_step, x_batch_train, y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred,", "+ loss_class_cls + loss_class_regr print(\"Total validation loss: %.4f\" % curr_loss) start_time = time.time()", "len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples", "- len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme", "overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1,", "fast_rcnn_reg_loss losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls',", "f'ground truth boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}') print(f'Loss", "tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step( global_step, x_batch_train, y_batch_train) R =", "= {v: k for k, v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count)", "weights provided by keras.\") (options, args) = parser.parse_args() if not options.train_path: # if", "given parser.error('Error: path to training data must be specified. Pass --path to command", "raise ValueError(\"Command line option parser must be one of 'pascal_voc' or 'simple'\") #", "to process at once.\", default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base network to use. Supports vgg", "weights.\", default='./model_frcnn.hdf5') parser.add_option(\"--input_weight_path\", dest=\"input_weight_path\", help=\"Input path for weights. If not specified, will try", "sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step( global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor)", "type=\"int\", dest=\"num_epochs\", help=\"Number of epochs.\", default=2000) parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to store all the", "as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True) rpn_class_loss =", "parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path to training data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path to validation", "2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) /", "import vgg as nn elif options.network == 'resnet50': from keras_frcnn import resnet as", "else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois", "np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) return sel_samples n_epochs =", "validation data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser to use. One of simple or pascal_voc\",", "val_imgs, _, _ = get_data(options.valid_path) if 'bg' not in classes_count: classes_count['bg'] = 0", "// 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist()", "== 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0:", "Loss: %.4f FRCNN reg Loss: %.4f\" % ( # step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss),", "load default weights provided by keras.\") (options, args) = parser.parse_args() if not options.train_path:", "rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true,", "classes (including bg) = {len(classes_count)}') config_output_filename = options.config_filename with open(config_output_filename, 'wb') as config_f:", "loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss,", "C.network = 'vgg' from keras_frcnn import vgg as nn elif options.network == 'resnet50':", "else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples", "= {mean_overlapping_bboxes}') if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes that", "class_mapping inv_map = {v: k for k, v in class_mapping.items()} print('Training images per", "return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0, :, -1]", "model_all.save_weights(model_path_regex.group(1) + \"_\" + '{:04d}'.format( epoch) + model_path_regex.group(2)) break # # Log every", "if step == n_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) ) /", "(None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input,", "step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def", "# # Log every 10 steps. # if step % 10 == 0:", "fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_valid_steps - 1 and C.verbose: mean_overlapping_bboxes =", "= fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result() #", "float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print(f'\\nAverage number of overlapping bounding boxes", "/ len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print(f'\\nAverage number of overlapping bounding boxes ' f'from", "Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0)", "selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois -", "if weight path was passed via command line if options.input_weight_path: C.base_net_weights = options.input_weight_path", "boxes ' f'from RPN = {mean_overlapping_bboxes}') if mean_overlapping_bboxes == 0: print('RPN is not", "{len(train_imgs)}') print(f'Num val samples {len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train')", "f'Mean number of bounding boxes from RPN overlapping ' f'ground truth boxes: {mean_overlapping_bboxes}')", "= options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2) != '.hdf5': print('Output weights must", "= loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total Loss: %.4f\" % curr_loss)", "store all the metadata related to \" \"the training (to be used when", "Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all = SGD(learning_rate=0.01) # Accuracy metrics for Fast RCNN", "dataset. for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true = y_batch_val y_rpn_cls_pred,", "time: {time.time() - start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr", "print(\"Validation Metrics: \") print( f'Mean number of bounding boxes from RPN overlapping '", "write training loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)", "tf.convert_to_tensor(0, tf.int64) one_step = tf.convert_to_tensor(1, tf.int64) print(\"Training started for %d epochs\" % n_epochs)", "path to weights based on backend and model C.base_net_weights = nn.get_weight_path() train_imgs, classes_count,", "= rpn_class_loss losses[step, 1] = rpn_reg_loss losses[step, 2] = fast_rcnn_class_loss losses[step, 3] =", "Fast RCNN model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss() fast_rcnn_reg_loss_fn", "= fast_rcnn_reg_loss losses[step, 4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss),", "in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping inv_map =", "rpn_class_loss, rpn_reg_loss @tf.function def frcnn_train_step(step, x_batch_train, X2, Y1, Y2): with tf.GradientTape() as frcnn_tape:", "best_loss = curr_loss model_all.save_weights(model_path_regex.group(1) + \"_\" + '{:04d}'.format( epoch) + model_path_regex.group(2)) break #", "C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid,", "rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss], model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write", "help=\"Location to store all the metadata related to \" \"the training (to be", "can be loaded when testing to ensure correct results') num_imgs = len(train_imgs) num_valid_imgs", "selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1,", "options.num_epochs BATCH_SIZE = 1 n_steps = num_imgs // BATCH_SIZE n_valid_steps = num_valid_imgs //", "# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS", "global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor) valid_losses[step, 0] = rpn_class_loss valid_losses[step, 1] = rpn_reg_loss", "x_batch_train, y_batch_train): with tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred =", "2]) loss_class_regr = np.mean(valid_losses[:, 3]) class_acc = np.mean(valid_losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) )", "default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number of RoIs to process at once.\", default=32)", "classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping inv_map = {v:", "samples {len(train_imgs)}') print(f'Num val samples {len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(),", "dest=\"input_weight_path\", help=\"Input path for weights. If not specified, will try to\" \" load", "neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) return sel_samples", ") / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = [] if C.verbose: print(\"Validation Metrics: \") print( f'Mean", "config.Config() C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips) C.rot_90 = bool(options.rot_90) C.model_path = options.output_weight_path", "if options.network == 'vgg': C.network = 'vgg' from keras_frcnn import vgg as nn", "num_rois = 1, we pick a random pos or neg sample selected_pos_samples =", "1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_steps", "x_batch_train, y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note:", "rpn_accuracy_for_epoch.append(0) continue sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32)", "classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='val') if K.image_data_format() == 'channels_first': input_shape_img = (3, None,", "tf.int64) one_step = tf.convert_to_tensor(1, tf.int64) print(\"Training started for %d epochs\" % n_epochs) for", "try to\" \" load default weights provided by keras.\") (options, args) = parser.parse_args()", "# Accuracy metrics for Fast RCNN model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric = tf.keras.metrics.CategoricalAccuracy()", "if C.verbose: print(\"Validation Metrics: \") print( f'Mean number of bounding boxes from RPN", "90 degree rotations in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--num_epochs\", type=\"int\", dest=\"num_epochs\", help=\"Number of", "fast_rcnn_reg_loss)]) if step == n_steps - 1 and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) )", "number of bounding boxes from RPN overlapping ' f'ground truth boxes: {mean_overlapping_bboxes}') print(f'Classifier", "the tensorboard with valid_writer.as_default(): tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step) tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred,", "return sel_samples n_epochs = options.num_epochs BATCH_SIZE = 1 n_steps = num_imgs // BATCH_SIZE", "C.model_path) if model_path_regex.group(2) != '.hdf5': print('Output weights must have .hdf5 filetype') exit(1) C.num_rois", "fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if step == n_steps - 1 and C.verbose: mean_overlapping_bboxes =", "Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN,", "- start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total validation", "len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print( f'\\nMean number of bounding boxes", "= SGD(learning_rate=0.01) # Accuracy metrics for Fast RCNN model train_classifier_metric = tf.keras.metrics.CategoricalAccuracy() val_classifier_metric", "best_loss = np.Inf start_time = time.time() class_mapping_inv = {v: k for k, v", "nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2])", "rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result() # write training loss and accuracy to", "rpn_class_loss valid_losses[step, 1] = rpn_reg_loss valid_losses[step, 2] = fast_rcnn_class_loss valid_losses[step, 3] = fast_rcnn_reg_loss", "np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch", "# Log every 10 steps. # if step % 10 == 0: #", "val_classifier_metric = tf.keras.metrics.CategoricalAccuracy() # Loss function of RPN model and Fast RCNN model", "rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2,", "from RPN overlapping ' f'ground truth boxes: {mean_overlapping_bboxes}') print(f'Classifier accuracy for bounding boxes", "C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) #", "to training data must be specified. Pass --path to command line') if options.parser", "if not options.train_path: # if filename is not given parser.error('Error: path to training", "rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :],", "boxes that overlap the ground truth boxes.' ' Check RPN settings or keep", "4] = fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr',", "Check RPN settings or keep training.') loss_rpn_cls = np.mean(valid_losses[:, 0]) loss_rpn_regr = np.mean(valid_losses[:,", "print(f'Loss RPN classifier: {loss_rpn_cls}') print(f'Loss RPN regression: {loss_rpn_regr}') print(f'Loss Detector classifier: {loss_class_cls}') print(f'Loss", "len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = [] if C.verbose: print(\"Validation Metrics: \") print( f'Mean number of", "extreme case where num_rois = 1, we pick a random pos or neg", "classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='val')", "Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0)", "for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) # Defining optimizers", "step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2):", "y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True) rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true,", "fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step( global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor) losses[step, 0] = rpn_class_loss", "have .hdf5 filetype') exit(1) C.num_rois = int(options.num_rois) if options.network == 'vgg': C.network =", "> 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0:", "roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor_valid.append(0) rpn_accuracy_for_epoch_valid.append(0) continue sel_samples =", "< C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois //", "= np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid = [] best_loss = np.Inf start_time", "val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = val_classifier_metric.result() # write training loss and accuracy to the", "get_data(options.train_path) val_imgs, _, _ = get_data(options.valid_path) if 'bg' not in classes_count: classes_count['bg'] =", "fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss], model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred)", "fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc @tf.function def rpn_valid_step(step, x_batch_train, y_batch_train): with tf.GradientTape()", "get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid, rpn_accuracy_for_epoch_valid) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples,", "{time.time() - start_time}') curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr print(\"Total", "options.network == 'resnet50': from keras_frcnn import resnet as nn C.network = 'resnet50' else:", "if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) return sel_samples n_epochs", "classes_count, class_mapping = get_data(options.train_path) val_imgs, _, _ = get_data(options.valid_path) if 'bg' not in", "rpn_reg_loss = rpn_valid_step( global_step, x_batch_val, y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True,", "rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid = [] best_loss = np.Inf start_time = time.time() class_mapping_inv", "random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0,", "loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc", "mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation: Average number of", "[] if C.verbose: print( f'\\nMean number of bounding boxes from RPN overlapping '", "= random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) return sel_samples n_epochs = options.num_epochs BATCH_SIZE =", "pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples =", "y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step( global_step, x_batch_train,", "train samples {len(train_imgs)}') print(f'Num val samples {len(val_imgs)}') data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length,", "2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist() try:", "training data.\") parser.add_option(\"-v\", \"--valid_path\", dest=\"valid_path\", help=\"Path to validation data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser", "tf.keras.metrics.CategoricalAccuracy() # Loss function of RPN model and Fast RCNN model rpn_class_loss_fn =", "tf.GradientTape() as rpn_tape: y_rpn_cls_true, y_rpn_regr_true = y_batch_train y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False) rpn_class_loss", "for epoch in range(n_epochs): print(\"\\nStart of epoch %d\" % (epoch + 1,)) progbar", "model_rpn.trainable_weights) optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights)) # write training loss and accuracy to the tensorboard with", "num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier", "K.image_data_format(), mode='val') if K.image_data_format() == 'channels_first': input_shape_img = (3, None, None) else: input_shape_img", "options.input_weight_path else: # set the path to weights based on backend and model", "print(\"\\nStart of epoch %d\" % (epoch + 1,)) progbar = generic_utils.Progbar(n_steps) # Iterate", "X2], training=False) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred) val_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc", "= neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) return", "= [] valid_losses = np.zeros((n_valid_steps, 5)) rpn_accuracy_rpn_monitor_valid = [] rpn_accuracy_for_epoch_valid = [] best_loss", "flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment with 90 degree", "not producing bounding boxes that overlap the ground truth boxes.' ' Check RPN", "shutil.rmtree('logs') parser = OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path to training data.\") parser.add_option(\"-v\", \"--valid_path\",", "pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples =", "model_classifier.trainable_weights) optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result() # write training loss and", "RPN model and Fast RCNN model rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors) rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors) fast_rcnn_class_loss_fn", "fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step) tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step) return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc def", "model_path_regex.group(2)) break # # Log every 10 steps. # if step % 10", "rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred) rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred) # write training loss and", "= float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_for_epoch_valid = [] if C.verbose: print(\"Validation Metrics: \")", "one_step = tf.convert_to_tensor(1, tf.int64) print(\"Training started for %d epochs\" % n_epochs) for epoch", "data.\") parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser to use. One of simple or pascal_voc\", default=\"pascal_voc\")", "from tensorflow.python.keras.utils import generic_utils sys.setrecursionlimit(40000) from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() # if Logs", "2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples =", "[] best_loss = np.Inf start_time = time.time() class_mapping_inv = {v: k for k,", "rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print( f'\\nMean number of bounding boxes from", "in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--vf\", dest=\"vertical_flips\", help=\"Augment with vertical flips in training.", "print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}') print(f'Loss RPN classifier: {loss_rpn_cls}') print(f'Loss", "y_rpn_regr_true = y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step( global_step, x_batch_val, y_batch_val) R", "selected_neg_samples else: # in the extreme case where num_rois = 1, we pick", "process at once.\", default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base network to use. Supports vgg or", "# check if weight path was passed via command line if options.input_weight_path: C.base_net_weights", "= random.choice(pos_samples) return sel_samples n_epochs = options.num_epochs BATCH_SIZE = 1 n_steps = num_imgs", "sel_samples = random.choice(pos_samples) return sel_samples n_epochs = options.num_epochs BATCH_SIZE = 1 n_steps =", "if model_path_regex.group(2) != '.hdf5': print('Output weights must have .hdf5 filetype') exit(1) C.num_rois =", "val_classifier_metric.result() # write training loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss',", "'simple'\") # pass the settings from the command line, and persist them in", "loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes", "fast_rcnn_class_acc progbar.update(step + 1, [('rpn_cls', rpn_class_loss), ('rpn_regr', rpn_reg_loss), ('detector_cls', fast_rcnn_class_loss), ('detector_regr', fast_rcnn_reg_loss)]) if", "+ loss_class_regr print(\"Total validation loss: %.4f\" % curr_loss) start_time = time.time() break val_classifier_metric.reset_states()", "Y1, Y2): with tf.GradientTape() as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=False) fast_rcnn_class_loss", "step % 10 == 0: # print(\"Step %d, RPN Cls Loss: %.4f RPN", "with vertical flips in training. (Default=false).\", action=\"store_true\", default=False) parser.add_option(\"--rot\", \"--rot_90\", dest=\"rot_90\", help=\"Augment with", "C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid) ) / len(rpn_accuracy_for_epoch_valid) rpn_accuracy_rpn_monitor_valid = [] print(f'\\nValidation: Average number", "= rpn_train_step( global_step, x_batch_train, y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7,", "parser.error('Error: path to training data must be specified. Pass --path to command line')", "calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R,", "# step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss), # float(fast_rcnn_reg_loss))) # Reset training metrics at the", "input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers", "= tf.summary.create_file_writer('logs/train/') valid_writer = tf.summary.create_file_writer('logs/valid/') @tf.function def rpn_train_step(step, x_batch_train, y_batch_train): with tf.GradientTape() as", "- len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples =", "Defining optimizers for all models optimizer_rpn = Adam(learning_rate=1e-5) optimizer_classifier = Adam(learning_rate=1e-5) optimizer_all =", "np.mean(valid_losses[:, 2]) loss_class_regr = np.mean(valid_losses[:, 3]) class_acc = np.mean(valid_losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid)", "write training loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)", "pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\", help=\"Number of RoIs to process at once.\",", "optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights)) train_classifier_metric.update_state(Y1, rcnn_class_pred) fast_rcnn_class_acc = train_classifier_metric.result() # write training loss and accuracy", "= bool(options.rot_90) C.model_path = options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if model_path_regex.group(2) != '.hdf5':", "progbar = generic_utils.Progbar(n_valid_steps) # Iterate over the batches of the dataset. for step,", "from tensorflow.python.ops.numpy_ops import np_config np_config.enable_numpy_behavior() # if Logs path directory exists, it will", "the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn", "train_classifier_metric.result() # write training loss and accuracy to the tensorboard with train_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss',", "weight path was passed via command line if options.input_weight_path: C.base_net_weights = options.input_weight_path else:", "if C.verbose: print( f'\\nMean number of bounding boxes from RPN overlapping ' f'ground", "both the RPN and the classifier, # used to load/save weights for the", "sel_samples, :], tf.float32) y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32) y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples,", "losses[step, 0] = rpn_class_loss losses[step, 1] = rpn_reg_loss losses[step, 2] = fast_rcnn_class_loss losses[step,", "mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_data_format(), mode='val') if K.image_data_format() == 'channels_first':", "= tf.convert_to_tensor(0, tf.int64) one_step = tf.convert_to_tensor(1, tf.int64) print(\"Training started for %d epochs\" %", "num_imgs // BATCH_SIZE n_valid_steps = num_valid_imgs // BATCH_SIZE losses = np.zeros((n_steps, 5)) rpn_accuracy_rpn_monitor", "loss and accuracy to the tensorboard with valid_writer.as_default(): tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step) tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss,", "= bool(options.vertical_flips) C.rot_90 = bool(options.rot_90) C.model_path = options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\", C.model_path) if", "dtype=tf.int64) global_step = tf.add(global_step, one_step) y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step( global_step, x_batch_train,", "y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step( global_step, x_batch_train, y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C,", "re import shutil import tensorflow as tf from tensorflow.keras import backend as K", "of RoIs to process at once.\", default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base network to use.", "else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) <", "enumerate(data_gen_train): # print(step, img_data['filepath']) y_rpn_cls_true, y_rpn_regr_true = y_batch_train step = tf.cast(step, dtype=tf.int64) global_step", "tf.float32) fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step( global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor) valid_losses[step, 0]", "'vgg': C.network = 'vgg' from keras_frcnn import vgg as nn elif options.network ==", "-1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) >", "training.') loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2])", "img_data) in enumerate(data_gen_val): y_rpn_cls_true, y_rpn_regr_true = y_batch_val y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step(", "parser.add_option(\"--config_filename\", dest=\"config_filename\", help=\"Location to store all the metadata related to \" \"the training", "rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch): neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :,", "else: # in the extreme case where num_rois = 1, we pick a", "parser.add_option(\"-o\", \"--parser\", dest=\"parser\", help=\"Parser to use. One of simple or pascal_voc\", default=\"pascal_voc\") parser.add_option(\"-n\",", "per class:') pprint.pprint(classes_count) print(f'Num classes (including bg) = {len(classes_count)}') config_output_filename = options.config_filename with", "Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0)", "range(n_epochs): print(\"\\nStart of epoch %d\" % (epoch + 1,)) progbar = generic_utils.Progbar(n_steps) #", "img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue sel_samples = get_selected_samples(Y1,", "the batches of the dataset. for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train): #", "filetype') exit(1) C.num_rois = int(options.num_rois) if options.network == 'vgg': C.network = 'vgg' from", "len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn", "this is a model that holds both the RPN and the classifier, #", "loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr", "= [] print(f'\\nValidation: Average number of overlapping bounding boxes ' f'from RPN =", "-1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples =", "ground truth boxes.' ' Check RPN settings or keep training.') loss_rpn_cls = np.mean(losses[:,", "pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2):", "sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch) x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32) y1_tensor =", "C.verbose: print( f'Total loss decreased from {best_loss} to {curr_loss}, saving weights') best_loss =", "line') if options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif options.parser == 'simple':", "to ensure correct results') num_imgs = len(train_imgs) num_valid_imgs = len(val_imgs) print(f'Num train samples", "must be specified. Pass --path to command line') if options.parser == 'pascal_voc': from", "from optparse import OptionParser import pickle import os import re import shutil import", "the directory if os.path.exists('logs'): shutil.rmtree('logs') parser = OptionParser() parser.add_option(\"-p\", \"--path\", dest=\"train_path\", help=\"Path to", "training data must be specified. Pass --path to command line') if options.parser ==", "line option parser must be one of 'pascal_voc' or 'simple'\") # pass the", "x_batch_val, y_batch_val) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note:", "y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou", "bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips) C.rot_90 = bool(options.rot_90) C.model_path = options.output_weight_path model_path_regex = re.match(\"^(.+)(\\.hdf5)$\",", "for weights. If not specified, will try to\" \" load default weights provided", "C = config.Config() C.use_horizontal_flips = bool(options.horizontal_flips) C.use_vertical_flips = bool(options.vertical_flips) C.rot_90 = bool(options.rot_90) C.model_path", "%.4f RPN reg Loss: %.4f \" # \"FRCNN Cls Loss: %.4f FRCNN reg", "at once.\", default=32) parser.add_option(\"--network\", dest=\"network\", help=\"Base network to use. Supports vgg or resnet50.\",", "random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) return sel_samples n_epochs = options.num_epochs BATCH_SIZE = 1", "format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is", "written to {config_output_filename}, ' f'and can be loaded when testing to ensure correct", "losses[step, 1] = rpn_reg_loss losses[step, 2] = fast_rcnn_class_loss losses[step, 3] = fast_rcnn_reg_loss losses[step,", "for {step} previous iterations') if mean_overlapping_bboxes == 0: print('RPN is not producing bounding", "keras_frcnn import losses as losses import keras_frcnn.roi_helpers as roi_helpers from tensorflow.python.keras.utils import generic_utils", "y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step( global_step, x_batch_train, y_batch_train) R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred,", "x_batch_train, x2_tensor, y1_tensor, y2_tensor) losses[step, 0] = rpn_class_loss losses[step, 1] = rpn_reg_loss losses[step,", "options.train_path: # if filename is not given parser.error('Error: path to training data must", "C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor) ) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print(f'\\nAverage number of", "to command line') if options.parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif options.parser", "print(\"Step %d, RPN Cls Loss: %.4f RPN reg Loss: %.4f \" # \"FRCNN", "tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step) return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss @tf.function def frcnn_train_step(step, x_batch_train, X2,", "y2_tensor) valid_losses[step, 0] = rpn_class_loss valid_losses[step, 1] = rpn_reg_loss valid_losses[step, 2] = fast_rcnn_class_loss", "as frcnn_tape: rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2], training=True) fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred) fast_rcnn_reg_loss", "= np.mean(valid_losses[:, 3]) class_acc = np.mean(valid_losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid) ) / len(rpn_accuracy_for_epoch_valid)" ]
[ "from typing import Optional, Any, List, Dict api_key = '' python_version = sys.version_info", "user) -> Optional[UserProtocol]: if request.param == \"no-user\": return None elif request.param == \"no-customer-id\":", "url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) -> str: prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1)", "str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types() -> List[str]: return [\"card\"] @pytest.fixture def user_email()", "limit=1) if prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\":", "-> stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id,", "def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) -> List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\":", "'licensed'}, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'recurring', 'unit_amount': 9999, 'unit_amount_decimal':", "def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]: if request.param == \"no-user\": return None elif request.param", "subscription_current_period_end) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name': unsubscribed_product_name,", "customer in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"])", "'type': 'service', 'name': subscribed_product_name, 'shippable': None, 'unit_label': None, 'url': stripe_subscription_product_url, 'metadata': {}, 'prices':", "'' python_version = sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\")", "subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request, user) ->", "stripe_unsubscribed_product_url() -> str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\")", "'interval_count': 1, 'trial_period_days': None, 'usage_type': 'licensed'}, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None},", "UserProtocol: user = User( 2, \"<EMAIL>\", 'cus_1234567890ABCD' ) return user @pytest.fixture def user_with_customer_id(user,", "url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency() -> str: return \"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name()", "None, 'current_period_end': None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url,", "= \"pass\" return default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id,", "test runner user\") return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request, user) -> UserProtocol: if", "'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id, 'cancel_at': None, 'current_period_end': subscription_current_period_end}}]", "'metadata': {}, 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}], 'subscription_info': {'sub_id': subscription_id, 'current_period_end':", "os import sys import pytest import stripe from stripe.error import InvalidRequestError from datetime", "[{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata': {}, 'nickname': None, 'recurring': {'aggregate_usage': None, 'interval': 'year',", "@pytest.fixture def non_existing_subscription_id() -> str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name() -> str: return", "@pytest.fixture def subscription_id(subscription): return subscription['id'] @pytest.fixture def subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id,", "@pytest.fixture(scope=\"session\") def stripe_price_currency() -> str: return \"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name() -> str: return", "{}, 'prices': [{'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\":", "product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency() -> str: return \"usd\"", "subscription_current_period_end) -> List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\",", "User(user_id=1, email=user_email) yield user if user.stripe_customer_id: try: subscriptions.delete_customer(user) except InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"])", "test runner user\") return user @pytest.fixture def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture", "payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture", "@pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return", "sys import pytest import stripe from stripe.error import InvalidRequestError from datetime import datetime,", "\"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]: if request.param == \"no-user\": return None", "None, 'current_period_end': None, 'sub_id': None}, 'type': 'recurring', 'unit_amount': 9999, 'unit_amount_decimal': '9999'}], 'shippable': None,", "stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id':", "@pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def non_existing_payment_method_id()", "stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [],", "'type': 'service', 'unit_label': None, 'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images': [], 'type': 'service', 'name':", "'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}], 'subscription_info':", "Any, List, Dict api_key = '' python_version = sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def", "@pytest.fixture def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return", "-> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name': unsubscribed_product_name, 'prices':", "'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}], 'subscription_info': {'sub_id':", "stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id, 'cancel_at': None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency)", "-> str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if prices: price = prices.data[0] else:", "import UserProtocol, User from typing import Optional, Any, List, Dict api_key = ''", "'service', 'name': subscribed_product_name, 'shippable': None, 'unit_label': None, 'url': stripe_subscription_product_url, 'metadata': {}, 'prices': [{'id':", "return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency() -> str: return \"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name() ->", "\"usage_type\": \"licensed\", }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None,", "unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, ) return price['id'] @pytest.fixture def subscription_id(subscription): return subscription['id']", "@pytest.fixture def subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end) ->", "'current_period_end': None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency,", "stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) -> str: prices = stripe.Price.list(product=stripe_subscription_product_id, active=True,", "subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [", "@pytest.fixture(params=[None, \"user\"]) def none_or_user(request, user) -> Optional[UserProtocol]: if not request.param: return None return", "-> UserProtocol: user = User( 2, \"<EMAIL>\", 'cus_1234567890ABCD' ) return user @pytest.fixture def", "'service', 'unit_label': None, 'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images': [], 'type': 'service', 'name': subscribed_product_name,", "stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer,", "user @pytest.fixture def wrong_customer_id() -> UserProtocol: user = User( 2, \"<EMAIL>\", 'cus_1234567890ABCD' )", "subscribed_product_name) -> str: products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if products: product = products['data'][0]", "'129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}], 'subscription_info':", "\"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types() -> List[str]: return [\"card\"] @pytest.fixture def user_email() -> str:", "\"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name() -> str: return 'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) ->", "'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': None, 'cancel_at': None, 'current_period_end': None}}]", "'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': subscription_id,", "{}, 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}], 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end,", "stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if products: product = products['data'][0] else: product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url)", "'type': 'recurring', 'unit_amount': 9999, 'unit_amount_decimal': '9999'}], 'shippable': None, 'subscription_info': {'cancel_at': None, 'current_period_end': None,", "== \"no-user\": return None elif request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test", "stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {},", "'prices': [{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata': {}, 'nickname': None, 'recurring': {'aggregate_usage': None, 'interval':", "[ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name': unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency, 'id':", "user_with_customer_id(user, user_email) -> UserProtocol: customers = stripe.Customer.list(email=user_email) for customer in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user,", "payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) ->", "default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return default_payment_method_for_customer @pytest.fixture", "products['data'][0] else: product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency() -> str:", "-> Optional[UserProtocol]: if request.param == \"no-user\": return None elif request.param == \"no-customer-id\": return", "\"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url() -> str:", "stripe_price_currency() -> str: return \"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name() -> str: return 'Silver' @pytest.fixture(scope=\"session\")", "UserProtocol: if request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return", "subscriptions import UserProtocol, User from typing import Optional, Any, List, Dict api_key =", "Optional, Any, List, Dict api_key = '' python_version = sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}'", "yield user if user.stripe_customer_id: try: subscriptions.delete_customer(user) except InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"]) def none_or_user(request,", "stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None,", "'images': [], 'metadata': {}, 'name': unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata': {},", "else: product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency() -> str: return", "def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def non_existing_payment_method_id() ->", "stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata':", "-> str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def", "'nickname': None, 'recurring': {'aggregate_usage': None, 'interval': 'year', 'interval_count': 1, 'trial_period_days': None, 'usage_type': 'licensed'},", "None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id, 'cancel_at': None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture", "{}, 'nickname': None, 'recurring': {'aggregate_usage': None, 'interval': 'year', 'interval_count': 1, 'trial_period_days': None, 'usage_type':", "{}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id, 'cancel_at': None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id,", "def checkout_success_url() -> str: return \"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url() -> str: return \"http://localhost/cancel\"", "'shippable': None, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'service', 'unit_label': None,", "datetime, timedelta import subscriptions from subscriptions import UserProtocol, User from typing import Optional,", "@pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end)", "stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata':", "return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer)", "from stripe.error import InvalidRequestError from datetime import datetime, timedelta import subscriptions from subscriptions", "\"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\" }, 'type': 'recurring', 'currency': stripe_price_currency,", "stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': subscription_id, 'current_period_end':", "None, 'metadata': {}, 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}], 'subscription_info': {'sub_id': subscription_id,", "stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\",", "stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return", "non_existing_payment_method_id() -> str: return \"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id() -> str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\")", "{'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}} ] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id,", "'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': None,", "def non_existing_payment_method_id() -> str: return \"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id() -> str: return \"sub_ABCDEFGH123456\"", "email=user_email) yield user if user.stripe_customer_id: try: subscriptions.delete_customer(user) except InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"]) def", "'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id,", "stripe.error import InvalidRequestError from datetime import datetime, timedelta import subscriptions from subscriptions import", "if request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user", "stripe from stripe.error import InvalidRequestError from datetime import datetime, timedelta import subscriptions from", ") return user @pytest.fixture def user_with_customer_id(user, user_email) -> UserProtocol: customers = stripe.Customer.list(email=user_email) for", "'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at':", "return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types() -> List[str]: return [\"card\"] @pytest.fixture def user_email() ->", "price['id'] @pytest.fixture def subscription_id(subscription): return subscription['id'] @pytest.fixture def subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture def", "prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if prices: price = prices.data[0] else: price =", "'recurring': {'aggregate_usage': None, 'interval': 'year', 'interval_count': 1, 'trial_period_days': None, 'usage_type': 'licensed'}, 'subscription_info': {'cancel_at':", "else: price = stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, ) return price['id'] @pytest.fixture", "currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, ) return price['id'] @pytest.fixture def subscription_id(subscription): return subscription['id'] @pytest.fixture", "pass @pytest.fixture(params=[None, \"user\"]) def none_or_user(request, user) -> Optional[UserProtocol]: if not request.param: return None", ") return price['id'] @pytest.fixture def subscription_id(subscription): return subscription['id'] @pytest.fixture def subscription_current_period_end(subscription): return subscription['current_period_end']", "product=stripe_subscription_product_id, ) return price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True,", "-> str: products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if products: product = products['data'][0] else:", "stripe_price_id) @pytest.fixture def non_existing_payment_method_id() -> str: return \"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id() -> str:", "None, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'service', 'unit_label': None, 'url':", "None, 'usage_type': 'licensed'}, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'recurring', 'unit_amount':", "str: return \"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name() -> str: return 'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name,", "stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata': {}, 'nickname': None, 'recurring': {'aggregate_usage': None, 'interval': 'year', 'interval_count':", "'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}], 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}} ]", "{}, 'name': unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata': {}, 'nickname': None, 'recurring':", "return 'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str: products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1)", "@pytest.fixture(scope=\"session\") def checkout_cancel_url() -> str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types() -> List[str]: return", "'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': None, 'cancel_at': None, 'current_period_end': None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id,", "'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\" },", "user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer']", "stripe_subscription_product_id, 'images': [], 'type': 'service', 'name': subscribed_product_name, 'shippable': None, 'unit_label': None, 'url': stripe_subscription_product_url,", "-> str: return 'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str: products = stripe.Product.list(url=stripe_subscription_product_url,", "'metadata': {}, 'name': unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata': {}, 'nickname': None,", "'129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}], 'subscription_info': {'sub_id':", "timedelta import subscriptions from subscriptions import UserProtocol, User from typing import Optional, Any,", "user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]: if request.param == \"no-user\":", "runner user\") return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]: if", "'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id, 'cancel_at': None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id,", "stripe_price_currency) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name': unsubscribed_product_name,", "stripe.Customer.list(email=user_email) for customer in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user", "-> UserProtocol: if request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\")", "subscribed_product_name, 'shippable': None, 'unit_label': None, 'url': stripe_subscription_product_url, 'metadata': {}, 'prices': [{'id': stripe_price_id, 'recurring':", "UserProtocol: user = User(user_id=1, email=user_email) yield user if user.stripe_customer_id: try: subscriptions.delete_customer(user) except InvalidRequestError:", "stripe_price_id(stripe_subscription_product_id) -> str: prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if prices: price = prices.data[0]", "def stripe_price_currency() -> str: return \"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name() -> str: return 'Silver'", "'name': unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata': {}, 'nickname': None, 'recurring': {'aggregate_usage':", "stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url() -> str: return \"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url()", "'id': stripe_unsubscribed_price_id, 'metadata': {}, 'nickname': None, 'recurring': {'aggregate_usage': None, 'interval': 'year', 'interval_count': 1,", "@pytest.fixture def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod:", "wrong_customer_id() -> UserProtocol: user = User( 2, \"<EMAIL>\", 'cus_1234567890ABCD' ) return user @pytest.fixture", "'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}],", "-> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id", "def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if prices: price =", "def user_with_and_without_customer_id(request, user) -> UserProtocol: if request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions", "'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\", },", "1, \"trial_period_days\": None, \"usage_type\": \"licensed\" }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal':", "return \"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id() -> str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name() ->", "expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_price_id, 'recurring': {", "-> str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types() -> List[str]: return [\"card\"] @pytest.fixture def", "'unit_amount': 9999, 'unit_amount_decimal': '9999'}], 'shippable': None, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None},", "@pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return", "user\") return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]: if request.param", "if not request.param: return None return user @pytest.fixture def wrong_customer_id() -> UserProtocol: user", "\"pass\" return payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check']", "{'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\":", "product=stripe_unsubscribed_product_id, ) return price['id'] @pytest.fixture def subscription_id(subscription): return subscription['id'] @pytest.fixture def subscription_current_period_end(subscription): return", "str: return 'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str: products = stripe.Product.list(url=stripe_subscription_product_url, active=True,", "@pytest.fixture def non_existing_payment_method_id() -> str: return \"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id() -> str: return", "user if user.stripe_customer_id: try: subscriptions.delete_customer(user) except InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"]) def none_or_user(request, user)", "return subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [", "'current_period_end': None, 'sub_id': None}, 'type': 'service', 'unit_label': None, 'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images':", "'prices': [{'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None,", "Dict api_key = '' python_version = sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\",", "'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str: products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if", "\"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\", }, 'type': 'recurring',", "'current_period_end': None, 'sub_id': None}, 'type': 'recurring', 'unit_amount': 9999, 'unit_amount_decimal': '9999'}], 'shippable': None, 'subscription_info':", "None}, 'type': 'service', 'unit_label': None, 'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images': [], 'type': 'service',", "stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, ) return price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) ->", "subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}} ] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name,", "= user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod:", "import datetime, timedelta import subscriptions from subscriptions import UserProtocol, User from typing import", "\"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture def payment_method_for_customer(user_with_customer_id)", "stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if products: product = products['data'][0] else: product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url)", "str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email) -> UserProtocol: user = User(user_id=1, email=user_email) yield", "UserProtocol: customers = stripe.Customer.list(email=user_email) for customer in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test runner", "-> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def non_existing_payment_method_id() -> str: return \"pm_ABCDEFGH123456\" @pytest.fixture", "product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) -> str: prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if prices:", "{}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': None, 'cancel_at': None, 'current_period_end': None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id,", "user\") return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request, user) -> UserProtocol: if request.param ==", "{}, 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}], 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}", "datetime import datetime, timedelta import subscriptions from subscriptions import UserProtocol, User from typing", "runner user\") return user @pytest.fixture def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def", "if prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"},", "\"month\"}, product=stripe_subscription_product_id, ) return price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id,", "request.param: return None return user @pytest.fixture def wrong_customer_id() -> UserProtocol: user = User(", "stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\":", "List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name': unsubscribed_product_name, 'prices': [{'currency':", "= stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if products: product = products['data'][0] else: product = stripe.Product.create(name=unsubscribed_product_name,", "\"trial_period_days\": None, \"usage_type\": \"licensed\" }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129',", "-> str: prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if prices: price = prices.data[0] else:", "for customer in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-customer-id\",", "expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) -> List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None,", "List[str]: return [\"card\"] @pytest.fixture def user_email() -> str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email)", "price = stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, ) return price['id'] @pytest.fixture def", "'name': subscribed_product_name, 'shippable': None, 'unit_label': None, 'url': stripe_subscription_product_url, 'metadata': {}, 'prices': [{'id': stripe_price_id,", "request.param == \"no-user\": return None elif request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions", "return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name() -> str: return 'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name)", "'sub_id': None}, 'type': 'recurring', 'unit_amount': 9999, 'unit_amount_decimal': '9999'}], 'shippable': None, 'subscription_info': {'cancel_at': None,", "= products['data'][0] else: product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency() ->", "stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def non_existing_payment_method_id() -> str: return \"pm_ABCDEFGH123456\" @pytest.fixture def", "str: return 'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str: products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True,", "= stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if prices: price = prices.data[0] else: price = stripe.Price.create(", "= stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, ) return price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id)", "@pytest.fixture def user_email() -> str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email) -> UserProtocol: user", "except InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"]) def none_or_user(request, user) -> Optional[UserProtocol]: if not request.param:", "= stripe.Customer.list(email=user_email) for customer in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return", "setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url() -> str: return \"http://localhost\" @pytest.fixture(scope=\"session\") def", "[], 'type': 'service', 'name': subscribed_product_name, 'shippable': None, 'unit_label': None, 'url': stripe_subscription_product_url, 'metadata': {},", "-> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def", "stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer)", "'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images': [], 'type': 'service', 'name': subscribed_product_name, 'shippable': None, 'unit_label':", "subscription_current_period_end, 'cancel_at': None}}], 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}} ] @pytest.fixture def", "no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]: if request.param == \"no-user\": return None elif request.param ==", "product = products['data'][0] else: product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency()", "subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id':", "{'sub_id': subscription_id, 'cancel_at': None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) -> List:", "@pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str: products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if products:", "subscribed_product_name() -> str: return 'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str: products =", "'cancel_at': None, 'current_period_end': None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url,", "default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def non_existing_payment_method_id() -> str: return", "pytest import stripe from stripe.error import InvalidRequestError from datetime import datetime, timedelta import", "return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name': unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency,", "subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name':", "user(user_email) -> UserProtocol: user = User(user_id=1, email=user_email) yield user if user.stripe_customer_id: try: subscriptions.delete_customer(user)", "return 'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str: products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1)", "InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"]) def none_or_user(request, user) -> Optional[UserProtocol]: if not request.param: return", "2, \"<EMAIL>\", 'cus_1234567890ABCD' ) return user @pytest.fixture def user_with_customer_id(user, user_email) -> UserProtocol: customers", "129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at':", "def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str: products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if products: product", "default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url() -> str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() -> str:", "subscription['id'] @pytest.fixture def subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end)", "pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url() -> str: return \"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url() -> str:", "{'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'recurring', 'unit_amount': 9999, 'unit_amount_decimal': '9999'}], 'shippable':", "'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end,", "{'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'service', 'unit_label': None, 'url': stripe_unsubscribed_product_url}, {'id':", "active=True, limit=1) if prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=9999, currency=\"usd\",", "\"pass\" return default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id)", "description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request, user) -> UserProtocol:", "User from typing import Optional, Any, List, Dict api_key = '' python_version =", "test runner user\") return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]:", "stripe_subscription_product_id, 'subscription_info': {'sub_id': None, 'cancel_at': None, 'current_period_end': None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name,", "from subscriptions import UserProtocol, User from typing import Optional, Any, List, Dict api_key", "return None elif request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\")", "= '' python_version = sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY'))", "limit=1) if products: product = products['data'][0] else: product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id']", "@pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if prices: price", "-> UserProtocol: user = User(user_id=1, email=user_email) yield user if user.stripe_customer_id: try: subscriptions.delete_customer(user) except", "return payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] =", "none_or_user(request, user) -> Optional[UserProtocol]: if not request.param: return None return user @pytest.fixture def", "None, 'recurring': {'aggregate_usage': None, 'interval': 'year', 'interval_count': 1, 'trial_period_days': None, 'usage_type': 'licensed'}, 'subscription_info':", "\"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\", }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount':", "-> stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id,", "= User(user_id=1, email=user_email) yield user if user.stripe_customer_id: try: subscriptions.delete_customer(user) except InvalidRequestError: pass @pytest.fixture(params=[None,", "price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if prices:", "'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': None, 'cancel_at': None, 'current_period_end': None}}] @pytest.fixture def", "'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'recurring', 'unit_amount': 9999, 'unit_amount_decimal': '9999'}],", "typing import Optional, Any, List, Dict api_key = '' python_version = sys.version_info ci_string", "@pytest.fixture(scope=\"session\") def unsubscribed_product_name() -> str: return 'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str:", "def none_or_user(request, user) -> Optional[UserProtocol]: if not request.param: return None return user @pytest.fixture", "runner user\") return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request, user) -> UserProtocol: if request.param", "-> str: return 'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str: products = stripe.Product.list(url=stripe_unsubscribed_product_url,", "-> List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\":", "None, 'url': stripe_subscription_product_url, 'metadata': {}, 'prices': [{'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\":", "\"usage_type\": \"licensed\" }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None,", "@pytest.fixture def wrong_customer_id() -> UserProtocol: user = User( 2, \"<EMAIL>\", 'cus_1234567890ABCD' ) return", "default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id)", "product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency() -> str: return \"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name() -> str:", "import sys import pytest import stripe from stripe.error import InvalidRequestError from datetime import", "def user_email() -> str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email) -> UserProtocol: user =", "'metadata': {}, 'prices': [{'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1,", "== \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture def", "str: products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if products: product = products['data'][0] else: product", "= stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) -> str: prices = stripe.Price.list(product=stripe_subscription_product_id,", "\"trial_period_days\": None, \"usage_type\": \"licensed\", }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129',", "prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id,", "recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, ) return price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices =", "{'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name': unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id,", "-> str: return \"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name() -> str: return 'Silver' @pytest.fixture(scope=\"session\") def", "return price['id'] @pytest.fixture def subscription_id(subscription): return subscription['id'] @pytest.fixture def subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture", "'url': stripe_subscription_product_url, 'metadata': {}, 'prices': [{'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\",", "== \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\",", "stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check']", "recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, ) return price['id'] @pytest.fixture def subscription_id(subscription): return subscription['id'] @pytest.fixture def", "import stripe from stripe.error import InvalidRequestError from datetime import datetime, timedelta import subscriptions", "ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url() -> str:", "{ \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\" }, 'type':", "import Optional, Any, List, Dict api_key = '' python_version = sys.version_info ci_string =", "def unsubscribed_product_name() -> str: return 'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str: products", "prices.data[0] else: price = stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, ) return price['id']", "-> str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() -> str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True)", "def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer']", "stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id,", "[\"card\"] @pytest.fixture def user_email() -> str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email) -> UserProtocol:", "9999, 'unit_amount_decimal': '9999'}], 'shippable': None, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type':", "= f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url() -> str: return", "None}}], 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}} ] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id,", "'interval': 'year', 'interval_count': 1, 'trial_period_days': None, 'usage_type': 'licensed'}, 'subscription_info': {'cancel_at': None, 'current_period_end': None,", "def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return default_payment_method_for_customer", "None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': None, 'cancel_at': None, 'current_period_end': None}}] @pytest.fixture", "= stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, ) return price['id'] @pytest.fixture def subscription_id(subscription):", "\"licensed\" }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata':", "str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() -> str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def", "def subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end) -> List:", "None, \"usage_type\": \"licensed\", }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname':", "import subscriptions from subscriptions import UserProtocol, User from typing import Optional, Any, List,", "= pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url() -> str: return \"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url() ->", "request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-user\",", "129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}],", "'current_period_end': subscription_current_period_end, 'cancel_at': None}}], 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}} ] @pytest.fixture", "str: prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if prices: price = prices.data[0] else: price", "price = prices.data[0] else: price = stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, )", "user_with_and_without_customer_id(request, user) -> UserProtocol: if request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test", "\"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\" }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount':", "customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request,", "user @pytest.fixture def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod:", "'images': [], 'type': 'service', 'name': subscribed_product_name, 'shippable': None, 'unit_label': None, 'url': stripe_subscription_product_url, 'metadata':", "str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name() -> str: return 'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url,", "action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url() -> str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() ->", "subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return", "\"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\" }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129,", "@pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]: if request.param == \"no-user\": return", "def stripe_price_id(stripe_subscription_product_id) -> str: prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if prices: price =", "@pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) -> List:", "stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images':", "return user @pytest.fixture def wrong_customer_id() -> UserProtocol: user = User( 2, \"<EMAIL>\", 'cus_1234567890ABCD'", "-> str: return \"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url() -> str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def", "= sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url()", "}, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {},", "-> str: products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if products: product = products['data'][0] else:", "return \"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name() -> str: return 'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url)", "'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}], 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at':", "None return user @pytest.fixture def wrong_customer_id() -> UserProtocol: user = User( 2, \"<EMAIL>\",", "return user @pytest.fixture def user_with_customer_id(user, user_email) -> UserProtocol: customers = stripe.Customer.list(email=user_email) for customer", "'subscription_info': {'sub_id': subscription_id, 'cancel_at': None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) ->", "subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def non_existing_payment_method_id() -> str:", "subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return", "@pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() -> str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig): stripe.api_key =", "return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]: if request.param ==", "expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) -> List: return [", "if products: product = products['data'][0] else: product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\")", "user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request,", "None, 'current_period_end': None, 'sub_id': None}, 'type': 'service', 'unit_label': None, 'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id,", "None, 'sub_id': None}, 'type': 'recurring', 'unit_amount': 9999, 'unit_amount_decimal': '9999'}], 'shippable': None, 'subscription_info': {'cancel_at':", "= user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) ->", "subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer) ->", "= prices.data[0] else: price = stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, ) return", "= stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency() -> str: return \"usd\" @pytest.fixture(scope=\"session\")", "def user(user_email) -> UserProtocol: user = User(user_id=1, email=user_email) yield user if user.stripe_customer_id: try:", "unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata': {}, 'nickname': None, 'recurring': {'aggregate_usage': None,", "'unit_label': None, 'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images': [], 'type': 'service', 'name': subscribed_product_name, 'shippable':", "return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) -> str: prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if", "{'aggregate_usage': None, 'interval': 'year', 'interval_count': 1, 'trial_period_days': None, 'usage_type': 'licensed'}, 'subscription_info': {'cancel_at': None,", "List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1,", "User( 2, \"<EMAIL>\", 'cus_1234567890ABCD' ) return user @pytest.fixture def user_with_customer_id(user, user_email) -> UserProtocol:", "'current_period_end': subscription_current_period_end, 'cancel_at': None}} ] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id,", "payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer'] =", "\"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"])", "None}, 'type': 'recurring', 'unit_amount': 9999, 'unit_amount_decimal': '9999'}], 'shippable': None, 'subscription_info': {'cancel_at': None, 'current_period_end':", "'metadata': {}, 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}], 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at':", "1, \"trial_period_days\": None, \"usage_type\": \"licensed\", }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal':", ") return price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1)", "non_existing_subscription_id() -> str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name() -> str: return 'Gold' @pytest.fixture(scope=\"session\")", "None elif request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return", "None, 'metadata': {}, 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}], 'subscription_info': {'sub_id': None, 'current_period_end':", "-> Optional[UserProtocol]: if not request.param: return None return user @pytest.fixture def wrong_customer_id() ->", "-> UserProtocol: customers = stripe.Customer.list(email=user_email) for customer in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test", "products['data'][0] else: product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) -> str:", "stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_currency() -> str: return \"usd\" @pytest.fixture(scope=\"session\") def", "prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id,", "@pytest.fixture def user_with_customer_id(user, user_email) -> UserProtocol: customers = stripe.Customer.list(email=user_email) for customer in customers:", "\"licensed\", }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata':", "None, \"usage_type\": \"licensed\" }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname':", "{'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}], 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}", "None, 'cancel_at': None, 'current_period_end': None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id,", "'current_period_end': subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) -> List: return [ {'id': stripe_price_id,", "None}} ] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency)", "'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product':", "default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription: return", "subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\"", "{'sub_id': None, 'cancel_at': None, 'current_period_end': None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name,", "[{'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\":", "not request.param: return None return user @pytest.fixture def wrong_customer_id() -> UserProtocol: user =", "None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id,", "'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str: products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if", "if products: product = products['data'][0] else: product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\")", "products: product = products['data'][0] else: product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return product['id'] @pytest.fixture(scope=\"session\") def", "stripe_price_id) -> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def non_existing_payment_method_id() -> str: return \"pm_ABCDEFGH123456\"", "return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def non_existing_payment_method_id() -> str: return \"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id()", "subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) -> List: return [ {'id': stripe_price_id, 'recurring':", "default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return default_payment_method_for_customer @pytest.fixture def", "@pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) -> str: prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if prices: price", "return user @pytest.fixture def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id) ->", "[ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None,", "return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] =", "import os import sys import pytest import stripe from stripe.error import InvalidRequestError from", "user) -> UserProtocol: if request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner", "else: product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) -> str: prices", "return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request, user) -> UserProtocol: if request.param == \"no-customer-id\":", "user_email() -> str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email) -> UserProtocol: user = User(user_id=1,", "stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id':", "= User( 2, \"<EMAIL>\", 'cus_1234567890ABCD' ) return user @pytest.fixture def user_with_customer_id(user, user_email) ->", "product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) -> str: prices =", "\"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\", }, 'type': 'recurring', 'currency': stripe_price_currency,", "autouse=True) def setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url() -> str: return \"http://localhost\"", "-> str: return \"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id() -> str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def", "f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url() -> str: return \"http://localhost/paywall\"", "checkout_success_url() -> str: return \"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url() -> str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\")", "user @pytest.fixture def user_with_customer_id(user, user_email) -> UserProtocol: customers = stripe.Customer.list(email=user_email) for customer in", "stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=9999,", "return \"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url() -> str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types() ->", "subscription_id(subscription): return subscription['id'] @pytest.fixture def subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency,", "None, 'interval': 'year', 'interval_count': 1, 'trial_period_days': None, 'usage_type': 'licensed'}, 'subscription_info': {'cancel_at': None, 'current_period_end':", "user.stripe_customer_id: try: subscriptions.delete_customer(user) except InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"]) def none_or_user(request, user) -> Optional[UserProtocol]:", "@pytest.fixture def user(user_email) -> UserProtocol: user = User(user_id=1, email=user_email) yield user if user.stripe_customer_id:", "description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user) ->", "stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str: products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if products: product =", "stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images': [], 'type': 'service', 'name': subscribed_product_name, 'shippable': None, 'unit_label': None,", "subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user)", "return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\":", "UserProtocol, User from typing import Optional, Any, List, Dict api_key = '' python_version", "python_version = sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def", "'9999'}], 'shippable': None, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'service', 'unit_label':", "api_key = '' python_version = sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\",", "-> List[str]: return [\"card\"] @pytest.fixture def user_email() -> str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def", "\"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() -> str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig): stripe.api_key", "= products['data'][0] else: product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id) ->", "return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email) -> UserProtocol: user = User(user_id=1, email=user_email) yield user", "def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) -> List: return", "stripe_unsubscribed_product_url, stripe_price_currency) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name':", "] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) ->", "prices.data[0] else: price = stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, ) return price['id']", "def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_price_id, 'recurring':", "unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_unsubscribed_product_id,", "payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return payment_method_for_customer @pytest.fixture def", "'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id,", "None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) -> List: return [ {'id':", "from datetime import datetime, timedelta import subscriptions from subscriptions import UserProtocol, User from", "None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\", }, 'type': 'recurring', 'currency':", "if prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"},", "Optional[UserProtocol]: if not request.param: return None return user @pytest.fixture def wrong_customer_id() -> UserProtocol:", "str: return \"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url() -> str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types()", "'sub_id': None}, 'type': 'service', 'unit_label': None, 'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images': [], 'type':", "1, 'trial_period_days': None, 'usage_type': 'licensed'}, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type':", "user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request, user) -> UserProtocol: if request.param == \"no-customer-id\": return", "stripe_price_id, stripe_price_currency) -> List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\":", "'cancel_at': None}} ] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url,", "def setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url() -> str: return \"http://localhost\" @pytest.fixture(scope=\"session\")", "stripe_subscription_product_url() -> str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() -> str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\",", "@pytest.fixture(scope=\"session\") def subscribed_product_name() -> str: return 'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str:", "\"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id() -> str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name() -> str:", "stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) -> List: return [ {'id':", "'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info':", "'unit_label': None, 'url': stripe_subscription_product_url, 'metadata': {}, 'prices': [{'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None,", "def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id)", "129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id, 'cancel_at':", "products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if products: product = products['data'][0] else: product =", "@pytest.fixture(scope=\"session\") def checkout_success_url() -> str: return \"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url() -> str: return", "def user_with_customer_id(user, user_email) -> UserProtocol: customers = stripe.Customer.list(email=user_email) for customer in customers: stripe.Customer.delete(customer)", "product = products['data'][0] else: product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\") def stripe_price_id(stripe_subscription_product_id)", "\"with-customer-id\"]) def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]: if request.param == \"no-user\": return None elif", "{'id': stripe_subscription_product_id, 'images': [], 'type': 'service', 'name': subscribed_product_name, 'shippable': None, 'unit_label': None, 'url':", "subscriptions from subscriptions import UserProtocol, User from typing import Optional, Any, List, Dict", "elif request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user", "\"with-customer-id\"]) def user_with_and_without_customer_id(request, user) -> UserProtocol: if request.param == \"no-customer-id\": return user subscriptions.create_customer(user,", "return None return user @pytest.fixture def wrong_customer_id() -> UserProtocol: user = User( 2,", "'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info':", "<reponame>primal100/stripe-subscriptions import os import sys import pytest import stripe from stripe.error import InvalidRequestError", "stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [], 'metadata': {},", "return [\"card\"] @pytest.fixture def user_email() -> str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email) ->", "return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url() ->", "def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url() -> str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\")", "if user.stripe_customer_id: try: subscriptions.delete_customer(user) except InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"]) def none_or_user(request, user) ->", "{ \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\", }, 'type':", "'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id, 'cancel_at': None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture def", "limit=1) if products: product = products['data'][0] else: product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id']", "\"http://localhost\" @pytest.fixture(scope=\"session\") def checkout_cancel_url() -> str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types() -> List[str]:", "payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return payment_method_for_customer @pytest.fixture", "= \"pass\" return payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id", "= stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if products: product = products['data'][0] else: product = stripe.Product.create(name=subscribed_product_name,", "def non_existing_subscription_id() -> str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name() -> str: return 'Gold'", "unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, ) return price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str:", "= prices.data[0] else: price = stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, ) return", "'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}} ] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name,", "'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}], 'subscription_info': {'sub_id': None,", "[], 'metadata': {}, 'name': unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata': {}, 'nickname':", "return subscription['id'] @pytest.fixture def subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id,", "def stripe_subscription_product_url() -> str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() -> str: return \"http://localhost/second_paywall\"", "unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images': [],", "stripe_unsubscribed_product_url) -> str: products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1) if products: product = products['data'][0]", "None, 'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images': [], 'type': 'service', 'name': subscribed_product_name, 'shippable': None,", "else: price = stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, ) return price['id'] @pytest.fixture(scope=\"session\")", "\"usd\" @pytest.fixture(scope=\"session\") def unsubscribed_product_name() -> str: return 'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) ->", "\"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\" }, 'type': 'recurring',", "return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-user\", \"no-customer-id\", \"with-customer-id\"]) def", "Optional[UserProtocol]: if request.param == \"no-user\": return None elif request.param == \"no-customer-id\": return user", "def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end) ->", "subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\":", "import pytest import stripe from stripe.error import InvalidRequestError from datetime import datetime, timedelta", "price = stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, ) return price['id'] @pytest.fixture(scope=\"session\") def", "'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id, 'cancel_at': None,", "return default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture", "\"<EMAIL>\", 'cus_1234567890ABCD' ) return user @pytest.fixture def user_with_customer_id(user, user_email) -> UserProtocol: customers =", "active=True, limit=1) if prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=129, currency=\"usd\",", "stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) -> List: return [ {'id': stripe_unsubscribed_product_id, 'images':", "'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'service', 'unit_label': None, 'url': stripe_unsubscribed_product_url},", "price = prices.data[0] else: price = stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, )", "stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request, user)", "None, 'unit_label': None, 'url': stripe_subscription_product_url, 'metadata': {}, 'prices': [{'id': stripe_price_id, 'recurring': { \"aggregate_usage\":", "subscription_current_period_end, 'cancel_at': None}} ] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url,", "try: subscriptions.delete_customer(user) except InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"]) def none_or_user(request, user) -> Optional[UserProtocol]: if", "return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture def payment_method_for_customer(user_with_customer_id) ->", "return price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if", "'recurring', 'unit_amount': 9999, 'unit_amount_decimal': '9999'}], 'shippable': None, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id':", "'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': None,", "in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def", "@pytest.fixture(scope=\"session\") def stripe_subscription_product_url() -> str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() -> str: return", "List, Dict api_key = '' python_version = sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser):", "products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if products: product = products['data'][0] else: product =", "checkout_cancel_url() -> str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types() -> List[str]: return [\"card\"] @pytest.fixture", "str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if prices: price = prices.data[0] else: price", "'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': None, 'cancel_at': None,", "subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency) -> List: return [ {'id': stripe_unsubscribed_product_id,", "customers = stripe.Customer.list(email=user_email) for customer in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\")", "def checkout_cancel_url() -> str: return \"http://localhost/cancel\" @pytest.fixture(scope=\"session\") def payment_method_types() -> List[str]: return [\"card\"]", "user) -> Optional[UserProtocol]: if not request.param: return None return user @pytest.fixture def wrong_customer_id()", "@pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) -> List: return [ {'id': stripe_price_id, 'recurring': {", "\"no-user\": return None elif request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner", "stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\"", "stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=129,", "user = User( 2, \"<EMAIL>\", 'cus_1234567890ABCD' ) return user @pytest.fixture def user_with_customer_id(user, user_email)", "if request.param == \"no-user\": return None elif request.param == \"no-customer-id\": return user subscriptions.create_customer(user,", "@pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str: products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if products:", "-> str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email) -> UserProtocol: user = User(user_id=1, email=user_email)", "'unit_amount_decimal': '9999'}], 'shippable': None, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'service',", "'usage_type': 'licensed'}, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'recurring', 'unit_amount': 9999,", "\"user\"]) def none_or_user(request, user) -> Optional[UserProtocol]: if not request.param: return None return user", "def subscribed_product_name() -> str: return 'Gold' @pytest.fixture(scope=\"session\") def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str: products", "subscriptions.delete_customer(user) except InvalidRequestError: pass @pytest.fixture(params=[None, \"user\"]) def none_or_user(request, user) -> Optional[UserProtocol]: if not", "'129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': subscription_id, 'cancel_at': None, 'current_period_end':", "expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id, unsubscribed_product_name, stripe_unsubscribed_price_id, stripe_subscription_product_url, stripe_unsubscribed_product_url, stripe_price_currency, subscription_id, subscription_current_period_end) -> List:", "user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod:", "pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url() -> str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def", "def subscription_id(subscription): return subscription['id'] @pytest.fixture def subscription_current_period_end(subscription): return subscription['current_period_end'] @pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id,", "129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': None, 'cancel_at':", "subscription_id, 'cancel_at': None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) -> List: return", "stripe.Price.create( unit_amount=9999, currency=\"usd\", recurring={\"interval\": \"year\"}, product=stripe_unsubscribed_product_id, ) return price['id'] @pytest.fixture def subscription_id(subscription): return", "active=True, limit=1) if products: product = products['data'][0] else: product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return", "products: product = products['data'][0] else: product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url) return product['id'] @pytest.fixture(scope=\"session\") def", "import InvalidRequestError from datetime import datetime, timedelta import subscriptions from subscriptions import UserProtocol,", "f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture def user(user_email) -> UserProtocol: user = User(user_id=1, email=user_email) yield user if", "user_email) -> UserProtocol: customers = stripe.Customer.list(email=user_email) for customer in customers: stripe.Customer.delete(customer) subscriptions.create_customer(user, description=\"stripe-subscriptions", "'metadata': {}, 'nickname': None, 'recurring': {'aggregate_usage': None, 'interval': 'year', 'interval_count': 1, 'trial_period_days': None,", "'shippable': None, 'unit_label': None, 'url': stripe_subscription_product_url, 'metadata': {}, 'prices': [{'id': stripe_price_id, 'recurring': {", "def wrong_customer_id() -> UserProtocol: user = User( 2, \"<EMAIL>\", 'cus_1234567890ABCD' ) return user", "def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str: products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if products: product", "'subscription_info': {'sub_id': None, 'cancel_at': None, 'current_period_end': None}}] @pytest.fixture def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id, subscribed_product_name, stripe_unsubscribed_product_id,", "@pytest.fixture(params=[\"no-customer-id\", \"with-customer-id\"]) def user_with_and_without_customer_id(request, user) -> UserProtocol: if request.param == \"no-customer-id\": return user", "@pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url() -> str: return", "user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription:", "@pytest.fixture def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency, subscription_id, subscription_current_period_end) -> List: return [ {'id': stripe_price_id,", "stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if prices: price = prices.data[0]", "'cancel_at': None, 'current_period_end': subscription_current_period_end}}] @pytest.fixture def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id, stripe_price_currency) -> List: return [", "def payment_method_types() -> List[str]: return [\"card\"] @pytest.fixture def user_email() -> str: return f'<EMAIL>-{ci_<EMAIL>'", "'cus_1234567890ABCD' ) return user @pytest.fixture def user_with_customer_id(user, user_email) -> UserProtocol: customers = stripe.Customer.list(email=user_email)", "'year', 'interval_count': 1, 'trial_period_days': None, 'usage_type': 'licensed'}, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id':", "stripe_subscription_product_url, 'metadata': {}, 'prices': [{'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\", \"interval_count\":", "str: products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if products: product = products['data'][0] else: product", "payment_method_for_customer @pytest.fixture def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod: default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id default_payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\"", "subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}], 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}} ]", "-> str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name() -> str: return 'Gold' @pytest.fixture(scope=\"session\") def", "str: return \"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id() -> str: return \"sub_ABCDEFGH123456\" @pytest.fixture(scope=\"session\") def subscribed_product_name()", "'129', 'nickname': None, 'metadata': {}, 'product': stripe_subscription_product_id, 'subscription_info': {'sub_id': None, 'cancel_at': None, 'current_period_end':", "user\") return user @pytest.fixture def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id) @pytest.fixture def default_payment_method_for_customer(user_with_customer_id)", "\"year\"}, product=stripe_unsubscribed_product_id, ) return price['id'] @pytest.fixture def subscription_id(subscription): return subscription['id'] @pytest.fixture def subscription_current_period_end(subscription):", "active=True, limit=1) if products: product = products['data'][0] else: product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url) return", "stripe_price_currency) -> List: return [ {'id': stripe_price_id, 'recurring': { \"aggregate_usage\": None, \"interval\": \"month\",", "stripe_unsubscribed_product_id, 'images': [], 'metadata': {}, 'name': unsubscribed_product_name, 'prices': [{'currency': stripe_price_currency, 'id': stripe_unsubscribed_price_id, 'metadata':", "'trial_period_days': None, 'usage_type': 'licensed'}, 'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None}, 'type': 'recurring',", "None, \"interval\": \"month\", \"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\" }, 'type': 'recurring', 'currency':", "payment_method_types() -> List[str]: return [\"card\"] @pytest.fixture def user_email() -> str: return f'<EMAIL>-{ci_<EMAIL>' @pytest.fixture", "subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def non_existing_payment_method_id() -> str: return \"pm_ABCDEFGH123456\" @pytest.fixture def non_existing_subscription_id() ->", "return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url() -> str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig):", "@pytest.fixture(scope=\"session\") def payment_method_types() -> List[str]: return [\"card\"] @pytest.fixture def user_email() -> str: return", "prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1) if prices: price = prices.data[0] else: price =", "unsubscribed_product_name() -> str: return 'Silver' @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str: products =", "stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id': None, 'current_period_end':", "default_payment_method_for_customer @pytest.fixture def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription: return subscriptions.create_subscription(user_with_customer_id, stripe_price_id) @pytest.fixture def", "limit=1) if prices: price = prices.data[0] else: price = stripe.Price.create( unit_amount=129, currency=\"usd\", recurring={\"interval\":", "parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url() -> str: return \"http://localhost/paywall\" @pytest.fixture(scope=\"session\") def stripe_unsubscribed_product_url()", "stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str: products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1) if products: product =", "user = User(user_id=1, email=user_email) yield user if user.stripe_customer_id: try: subscriptions.delete_customer(user) except InvalidRequestError: pass", "str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\") @pytest.fixture(scope=\"session\") def checkout_success_url()", "stripe_unsubscribed_price_id, 'metadata': {}, 'nickname': None, 'recurring': {'aggregate_usage': None, 'interval': 'year', 'interval_count': 1, 'trial_period_days':", "'cancel_at': None}}], 'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}} ] @pytest.fixture def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id,", "request.param == \"no-customer-id\": return user subscriptions.create_customer(user, description=\"stripe-subscriptions test runner user\") return user @pytest.fixture", "sys.version_info ci_string = f'{os.name}-{python_version.major}{python_version.minor}' def pytest_addoption(parser): parser.addoption(\"--apikey\", action=\"store\", default=os.environ.get('STRIPE_TEST_SECRET_KEY')) @pytest.fixture(scope=\"session\") def stripe_subscription_product_url() ->", "default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id) @pytest.fixture def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer'] =", "description=\"stripe-subscriptions test runner user\") return user @pytest.fixture def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod: return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id)", "currency=\"usd\", recurring={\"interval\": \"month\"}, product=stripe_subscription_product_id, ) return price['id'] @pytest.fixture(scope=\"session\") def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str: prices", "'recurring', 'currency': stripe_price_currency, 'unit_amount': 129, 'unit_amount_decimal': '129', 'nickname': None, 'metadata': {}, 'subscription_info': {'sub_id':", "= stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1) if prices: price = prices.data[0] else: price = stripe.Price.create(", "\"interval_count\": 1, \"trial_period_days\": None, \"usage_type\": \"licensed\", }, 'type': 'recurring', 'currency': stripe_price_currency, 'unit_amount': 129,", "def stripe_unsubscribed_product_url() -> str: return \"http://localhost/second_paywall\" @pytest.fixture(scope=\"session\", autouse=True) def setup_stripe(pytestconfig): stripe.api_key = pytestconfig.getoption(\"apikey\")", "def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod: payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id payment_method_for_customer['card']['checks']['cvc_check'] = \"pass\" return payment_method_for_customer", "None, 'sub_id': None}, 'type': 'service', 'unit_label': None, 'url': stripe_unsubscribed_product_url}, {'id': stripe_subscription_product_id, 'images': [],", "InvalidRequestError from datetime import datetime, timedelta import subscriptions from subscriptions import UserProtocol, User" ]
[ "TTS self._tts = None def start(self): self._loop.run_in_executor(None, self._tts_loop) return super().start() def _tts_loop(self): self._tts", "for voice in voices: # print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def _on_receive_danmaku(self, danmaku:", "__init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # 翻译 self._translator = translate.Translator(from_lang='zh', to_lang='ja') # TTS", "- {translated_text}') # TODO 加入队列 self._tts.say(translated_text) async def main(): client = BLiveTts(213) await", "**kwargs) # 翻译 self._translator = translate.Translator(from_lang='zh', to_lang='ja') # TTS self._tts = None def", "def _do_say(self, text): # TODO 常用的加缓存? translated_text = await self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text}", "import blivedm.blivedm as blivedm class BLiveTts(blivedm.BLiveClient): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) #", "加入队列 self._tts.say(translated_text) async def main(): client = BLiveTts(213) await client.start() if __name__ ==", "self._tts.startLoop() async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async def _on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message)", "r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async def _on_super_chat(self, message: blivedm.SuperChatMessage):", "async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async def _on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message) def", "None def start(self): self._loop.run_in_executor(None, self._tts_loop) return super().start() def _tts_loop(self): self._tts = pyttsx3.init() #", "pyttsx3 import translate import blivedm.blivedm as blivedm class BLiveTts(blivedm.BLiveClient): def __init__(self, *args, **kwargs):", "self._tts = pyttsx3.init() # voice = self._tts.getProperty('voice') # print('cur voice', voice) # voices", "= await self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text} - {translated_text}') # TODO 加入队列 self._tts.say(translated_text) async", "async def _on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message) def _say(self, text): self._loop.create_task(self._do_say(text)) async def _do_say(self,", "text): self._loop.create_task(self._do_say(text)) async def _do_say(self, text): # TODO 常用的加缓存? translated_text = await self._loop.run_in_executor(None,", "= pyttsx3.init() # voice = self._tts.getProperty('voice') # print('cur voice', voice) # voices =", "**kwargs): super().__init__(*args, **kwargs) # 翻译 self._translator = translate.Translator(from_lang='zh', to_lang='ja') # TTS self._tts =", "translated_text = await self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text} - {translated_text}') # TODO 加入队列 self._tts.say(translated_text)", "async def _do_say(self, text): # TODO 常用的加缓存? translated_text = await self._loop.run_in_executor(None, self._translator.translate, text)", "_say(self, text): self._loop.create_task(self._do_say(text)) async def _do_say(self, text): # TODO 常用的加缓存? translated_text = await", "*args, **kwargs): super().__init__(*args, **kwargs) # 翻译 self._translator = translate.Translator(from_lang='zh', to_lang='ja') # TTS self._tts", "翻译 self._translator = translate.Translator(from_lang='zh', to_lang='ja') # TTS self._tts = None def start(self): self._loop.run_in_executor(None,", "print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async def _on_super_chat(self,", "text): # TODO 常用的加缓存? translated_text = await self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text} - {translated_text}')", "# TODO 常用的加缓存? translated_text = await self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text} - {translated_text}') #", "print(f'{text} - {translated_text}') # TODO 加入队列 self._tts.say(translated_text) async def main(): client = BLiveTts(213)", "# -*- coding: utf-8 -*- import asyncio import pyttsx3 import translate import blivedm.blivedm", "super().__init__(*args, **kwargs) # 翻译 self._translator = translate.Translator(from_lang='zh', to_lang='ja') # TTS self._tts = None", "self._tts.getProperty('voice') # print('cur voice', voice) # voices = self._tts.getProperty('voices') # for voice in", "_on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async def _on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message) def _say(self, text):", "message: blivedm.SuperChatMessage): self._say(message.message) def _say(self, text): self._loop.create_task(self._do_say(text)) async def _do_say(self, text): # TODO", "voice = self._tts.getProperty('voice') # print('cur voice', voice) # voices = self._tts.getProperty('voices') # for", "= self._tts.getProperty('voices') # for voice in voices: # print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async", "# TTS self._tts = None def start(self): self._loop.run_in_executor(None, self._tts_loop) return super().start() def _tts_loop(self):", "def _say(self, text): self._loop.create_task(self._do_say(text)) async def _do_say(self, text): # TODO 常用的加缓存? translated_text =", "coding: utf-8 -*- import asyncio import pyttsx3 import translate import blivedm.blivedm as blivedm", "return super().start() def _tts_loop(self): self._tts = pyttsx3.init() # voice = self._tts.getProperty('voice') # print('cur", "# 翻译 self._translator = translate.Translator(from_lang='zh', to_lang='ja') # TTS self._tts = None def start(self):", "常用的加缓存? translated_text = await self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text} - {translated_text}') # TODO 加入队列", "TODO 常用的加缓存? translated_text = await self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text} - {translated_text}') # TODO", "self._translator.translate, text) print(f'{text} - {translated_text}') # TODO 加入队列 self._tts.say(translated_text) async def main(): client", "await self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text} - {translated_text}') # TODO 加入队列 self._tts.say(translated_text) async def", "self._loop.create_task(self._do_say(text)) async def _do_say(self, text): # TODO 常用的加缓存? translated_text = await self._loop.run_in_executor(None, self._translator.translate,", "to_lang='ja') # TTS self._tts = None def start(self): self._loop.run_in_executor(None, self._tts_loop) return super().start() def", "voice) # voices = self._tts.getProperty('voices') # for voice in voices: # print(voice) self._tts.setProperty('voice',", "# print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async def", "# for voice in voices: # print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def _on_receive_danmaku(self,", "utf-8 -*- import asyncio import pyttsx3 import translate import blivedm.blivedm as blivedm class", "import pyttsx3 import translate import blivedm.blivedm as blivedm class BLiveTts(blivedm.BLiveClient): def __init__(self, *args,", "# voice = self._tts.getProperty('voice') # print('cur voice', voice) # voices = self._tts.getProperty('voices') #", "danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async def _on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message) def _say(self, text): self._loop.create_task(self._do_say(text))", "# TODO 加入队列 self._tts.say(translated_text) async def main(): client = BLiveTts(213) await client.start() if", "def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # 翻译 self._translator = translate.Translator(from_lang='zh', to_lang='ja') #", "self._tts = None def start(self): self._loop.run_in_executor(None, self._tts_loop) return super().start() def _tts_loop(self): self._tts =", "start(self): self._loop.run_in_executor(None, self._tts_loop) return super().start() def _tts_loop(self): self._tts = pyttsx3.init() # voice =", "print('cur voice', voice) # voices = self._tts.getProperty('voices') # for voice in voices: #", "translate.Translator(from_lang='zh', to_lang='ja') # TTS self._tts = None def start(self): self._loop.run_in_executor(None, self._tts_loop) return super().start()", "class BLiveTts(blivedm.BLiveClient): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # 翻译 self._translator = translate.Translator(from_lang='zh',", "blivedm.blivedm as blivedm class BLiveTts(blivedm.BLiveClient): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # 翻译", "# voices = self._tts.getProperty('voices') # for voice in voices: # print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0')", "{translated_text}') # TODO 加入队列 self._tts.say(translated_text) async def main(): client = BLiveTts(213) await client.start()", "= None def start(self): self._loop.run_in_executor(None, self._tts_loop) return super().start() def _tts_loop(self): self._tts = pyttsx3.init()", "BLiveTts(blivedm.BLiveClient): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # 翻译 self._translator = translate.Translator(from_lang='zh', to_lang='ja')", "as blivedm class BLiveTts(blivedm.BLiveClient): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # 翻译 self._translator", "_do_say(self, text): # TODO 常用的加缓存? translated_text = await self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text} -", "blivedm.SuperChatMessage): self._say(message.message) def _say(self, text): self._loop.create_task(self._do_say(text)) async def _do_say(self, text): # TODO 常用的加缓存?", "translate import blivedm.blivedm as blivedm class BLiveTts(blivedm.BLiveClient): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)", "blivedm class BLiveTts(blivedm.BLiveClient): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # 翻译 self._translator =", "def start(self): self._loop.run_in_executor(None, self._tts_loop) return super().start() def _tts_loop(self): self._tts = pyttsx3.init() # voice", "in voices: # print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg)", "_on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message) def _say(self, text): self._loop.create_task(self._do_say(text)) async def _do_say(self, text): #", "self._say(danmaku.msg) async def _on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message) def _say(self, text): self._loop.create_task(self._do_say(text)) async def", "voices: # print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async", "self._loop.run_in_executor(None, self._translator.translate, text) print(f'{text} - {translated_text}') # TODO 加入队列 self._tts.say(translated_text) async def main():", "_tts_loop(self): self._tts = pyttsx3.init() # voice = self._tts.getProperty('voice') # print('cur voice', voice) #", "text) print(f'{text} - {translated_text}') # TODO 加入队列 self._tts.say(translated_text) async def main(): client =", "asyncio import pyttsx3 import translate import blivedm.blivedm as blivedm class BLiveTts(blivedm.BLiveClient): def __init__(self,", "-*- import asyncio import pyttsx3 import translate import blivedm.blivedm as blivedm class BLiveTts(blivedm.BLiveClient):", "super().start() def _tts_loop(self): self._tts = pyttsx3.init() # voice = self._tts.getProperty('voice') # print('cur voice',", "def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async def _on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message) def _say(self,", "<gh_stars>1-10 # -*- coding: utf-8 -*- import asyncio import pyttsx3 import translate import", "= self._tts.getProperty('voice') # print('cur voice', voice) # voices = self._tts.getProperty('voices') # for voice", "self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage): self._say(danmaku.msg) async def _on_super_chat(self, message:", "self._tts_loop) return super().start() def _tts_loop(self): self._tts = pyttsx3.init() # voice = self._tts.getProperty('voice') #", "-*- coding: utf-8 -*- import asyncio import pyttsx3 import translate import blivedm.blivedm as", "self._tts.getProperty('voices') # for voice in voices: # print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def", "self._loop.run_in_executor(None, self._tts_loop) return super().start() def _tts_loop(self): self._tts = pyttsx3.init() # voice = self._tts.getProperty('voice')", "import translate import blivedm.blivedm as blivedm class BLiveTts(blivedm.BLiveClient): def __init__(self, *args, **kwargs): super().__init__(*args,", "blivedm.DanmakuMessage): self._say(danmaku.msg) async def _on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message) def _say(self, text): self._loop.create_task(self._do_say(text)) async", "TODO 加入队列 self._tts.say(translated_text) async def main(): client = BLiveTts(213) await client.start() if __name__", "pyttsx3.init() # voice = self._tts.getProperty('voice') # print('cur voice', voice) # voices = self._tts.getProperty('voices')", "self._say(message.message) def _say(self, text): self._loop.create_task(self._do_say(text)) async def _do_say(self, text): # TODO 常用的加缓存? translated_text", "def _on_super_chat(self, message: blivedm.SuperChatMessage): self._say(message.message) def _say(self, text): self._loop.create_task(self._do_say(text)) async def _do_say(self, text):", "voice', voice) # voices = self._tts.getProperty('voices') # for voice in voices: # print(voice)", "def _tts_loop(self): self._tts = pyttsx3.init() # voice = self._tts.getProperty('voice') # print('cur voice', voice)", "import asyncio import pyttsx3 import translate import blivedm.blivedm as blivedm class BLiveTts(blivedm.BLiveClient): def", "async def main(): client = BLiveTts(213) await client.start() if __name__ == '__main__': asyncio.get_event_loop().run_until_complete(main())", "voices = self._tts.getProperty('voices') # for voice in voices: # print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop()", "self._tts.say(translated_text) async def main(): client = BLiveTts(213) await client.start() if __name__ == '__main__':", "voice in voices: # print(voice) self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\TTS_MS_JA-JP_HARUKA_11.0') self._tts.startLoop() async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage):", "= translate.Translator(from_lang='zh', to_lang='ja') # TTS self._tts = None def start(self): self._loop.run_in_executor(None, self._tts_loop) return", "# print('cur voice', voice) # voices = self._tts.getProperty('voices') # for voice in voices:", "self._translator = translate.Translator(from_lang='zh', to_lang='ja') # TTS self._tts = None def start(self): self._loop.run_in_executor(None, self._tts_loop)" ]
[ "import traceback from scapy.sendrecv import sendp, sniff import info import tests def capture(interface,", "fn = tests.sender_default else: fn = tests.check_nothing try: status = fn(testname, packets) except", "def active(host, testname): test = tests.TESTS[testname] iface = info.get(\"host_if_name\", host) packets = test.active_fn(testname)", "fn(testname, packets) except AssertionError as e: traceback.print_tb(e.__traceback__) status = False if (status): print(\"PASS\")", "packets = [] for i in range(len(cap)): packets.append(cap[i]) return packets def passive(host, testname):", "passive(host, testname): iface = info.get(\"host_if_name\", host) packets = capture(iface) test = tests.TESTS[testname] if", "elif host == test.host_s: fn = tests.sender_default else: fn = tests.check_nothing try: status", "in range(len(cap)): packets.append(cap[i]) return packets def passive(host, testname): iface = info.get(\"host_if_name\", host) packets", "def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str) # Technically", "test.active_fn(testname) send_packets(packets, iface) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\",", "import os import shutil import sys import traceback from scapy.sendrecv import sendp, sniff", "else: fn = tests.check_nothing try: status = fn(testname, packets) except AssertionError as e:", "fn = tests.check_nothing try: status = fn(testname, packets) except AssertionError as e: traceback.print_tb(e.__traceback__)", "output_file=\"test\"): cap = sniff(iface=interface, timeout=info.TIMEOUT) # FIXME packets = [] for i in", "info.get(\"host_if_name\", host) packets = capture(iface) test = tests.TESTS[testname] if host == test.host_r: fn", "iface = info.get(\"host_if_name\", host) packets = capture(iface) test = tests.TESTS[testname] if host ==", "test.passive_fn elif host == test.host_s: fn = tests.sender_default else: fn = tests.check_nothing try:", "determine this, but this is simpler parser.add_argument(\"--host\", type=int) args = parser.parse_args() assert(args.passive ^", "from scapy.sendrecv import sendp, sniff import info import tests def capture(interface, output_file=\"test\"): cap", "= capture(iface) test = tests.TESTS[testname] if host == test.host_r: fn = test.passive_fn elif", "as e: traceback.print_tb(e.__traceback__) status = False if (status): print(\"PASS\") else: print(\"FAIL\") def send_packets(packets,", "== test.host_r: fn = test.passive_fn elif host == test.host_s: fn = tests.sender_default else:", "*could* determine this, but this is simpler parser.add_argument(\"--host\", type=int) args = parser.parse_args() assert(args.passive", "= tests.TESTS[testname] iface = info.get(\"host_if_name\", host) packets = test.active_fn(testname) send_packets(packets, iface) def main():", "scapy.sendrecv import sendp, sniff import info import tests def capture(interface, output_file=\"test\"): cap =", "(status): print(\"PASS\") else: print(\"FAIL\") def send_packets(packets, iface): for packet in packets: sendp(packet, iface=iface)", "packets: sendp(packet, iface=iface) def active(host, testname): test = tests.TESTS[testname] iface = info.get(\"host_if_name\", host)", "for packet in packets: sendp(packet, iface=iface) def active(host, testname): test = tests.TESTS[testname] iface", "info.get(\"host_if_name\", host) packets = test.active_fn(testname) send_packets(packets, iface) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--passive\",", "sniff(iface=interface, timeout=info.TIMEOUT) # FIXME packets = [] for i in range(len(cap)): packets.append(cap[i]) return", "try: status = fn(testname, packets) except AssertionError as e: traceback.print_tb(e.__traceback__) status = False", "parser.parse_args() assert(args.passive ^ args.active) if args.passive: passive(args.host, args.testname) else: active(args.host, args.testname) if __name__", "cap = sniff(iface=interface, timeout=info.TIMEOUT) # FIXME packets = [] for i in range(len(cap)):", "packets def passive(host, testname): iface = info.get(\"host_if_name\", host) packets = capture(iface) test =", "= test.active_fn(testname) send_packets(packets, iface) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\")", "import sendp, sniff import info import tests def capture(interface, output_file=\"test\"): cap = sniff(iface=interface,", "e: traceback.print_tb(e.__traceback__) status = False if (status): print(\"PASS\") else: print(\"FAIL\") def send_packets(packets, iface):", "print(\"FAIL\") def send_packets(packets, iface): for packet in packets: sendp(packet, iface=iface) def active(host, testname):", "type=str) # Technically we *could* determine this, but this is simpler parser.add_argument(\"--host\", type=int)", "= test.passive_fn elif host == test.host_s: fn = tests.sender_default else: fn = tests.check_nothing", "range(len(cap)): packets.append(cap[i]) return packets def passive(host, testname): iface = info.get(\"host_if_name\", host) packets =", "= tests.check_nothing try: status = fn(testname, packets) except AssertionError as e: traceback.print_tb(e.__traceback__) status", "action=\"store_true\") parser.add_argument(\"--testname\", type=str) # Technically we *could* determine this, but this is simpler", "packets = capture(iface) test = tests.TESTS[testname] if host == test.host_r: fn = test.passive_fn", "= tests.TESTS[testname] if host == test.host_r: fn = test.passive_fn elif host == test.host_s:", "= argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str) # Technically we *could* determine", "send_packets(packets, iface) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str)", "active(host, testname): test = tests.TESTS[testname] iface = info.get(\"host_if_name\", host) packets = test.active_fn(testname) send_packets(packets,", "[] for i in range(len(cap)): packets.append(cap[i]) return packets def passive(host, testname): iface =", "packet in packets: sendp(packet, iface=iface) def active(host, testname): test = tests.TESTS[testname] iface =", "= info.get(\"host_if_name\", host) packets = capture(iface) test = tests.TESTS[testname] if host == test.host_r:", "test.host_r: fn = test.passive_fn elif host == test.host_s: fn = tests.sender_default else: fn", "= sniff(iface=interface, timeout=info.TIMEOUT) # FIXME packets = [] for i in range(len(cap)): packets.append(cap[i])", "def send_packets(packets, iface): for packet in packets: sendp(packet, iface=iface) def active(host, testname): test", "host == test.host_s: fn = tests.sender_default else: fn = tests.check_nothing try: status =", "type=int) args = parser.parse_args() assert(args.passive ^ args.active) if args.passive: passive(args.host, args.testname) else: active(args.host,", "python3 import argparse import os import shutil import sys import traceback from scapy.sendrecv", "traceback.print_tb(e.__traceback__) status = False if (status): print(\"PASS\") else: print(\"FAIL\") def send_packets(packets, iface): for", "parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str) # Technically we *could* determine this, but", "iface) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str) #", "def capture(interface, output_file=\"test\"): cap = sniff(iface=interface, timeout=info.TIMEOUT) # FIXME packets = [] for", "main(): parser = argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str) # Technically we", "i in range(len(cap)): packets.append(cap[i]) return packets def passive(host, testname): iface = info.get(\"host_if_name\", host)", "packets = test.active_fn(testname) send_packets(packets, iface) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\",", "host) packets = capture(iface) test = tests.TESTS[testname] if host == test.host_r: fn =", "send_packets(packets, iface): for packet in packets: sendp(packet, iface=iface) def active(host, testname): test =", "shutil import sys import traceback from scapy.sendrecv import sendp, sniff import info import", "iface=iface) def active(host, testname): test = tests.TESTS[testname] iface = info.get(\"host_if_name\", host) packets =", "simpler parser.add_argument(\"--host\", type=int) args = parser.parse_args() assert(args.passive ^ args.active) if args.passive: passive(args.host, args.testname)", "if host == test.host_r: fn = test.passive_fn elif host == test.host_s: fn =", "tests.sender_default else: fn = tests.check_nothing try: status = fn(testname, packets) except AssertionError as", "tests.check_nothing try: status = fn(testname, packets) except AssertionError as e: traceback.print_tb(e.__traceback__) status =", "sys import traceback from scapy.sendrecv import sendp, sniff import info import tests def", "info import tests def capture(interface, output_file=\"test\"): cap = sniff(iface=interface, timeout=info.TIMEOUT) # FIXME packets", "sendp, sniff import info import tests def capture(interface, output_file=\"test\"): cap = sniff(iface=interface, timeout=info.TIMEOUT)", "# Technically we *could* determine this, but this is simpler parser.add_argument(\"--host\", type=int) args", "is simpler parser.add_argument(\"--host\", type=int) args = parser.parse_args() assert(args.passive ^ args.active) if args.passive: passive(args.host,", "= [] for i in range(len(cap)): packets.append(cap[i]) return packets def passive(host, testname): iface", "for i in range(len(cap)): packets.append(cap[i]) return packets def passive(host, testname): iface = info.get(\"host_if_name\",", "assert(args.passive ^ args.active) if args.passive: passive(args.host, args.testname) else: active(args.host, args.testname) if __name__ ==", "test.host_s: fn = tests.sender_default else: fn = tests.check_nothing try: status = fn(testname, packets)", "def passive(host, testname): iface = info.get(\"host_if_name\", host) packets = capture(iface) test = tests.TESTS[testname]", "timeout=info.TIMEOUT) # FIXME packets = [] for i in range(len(cap)): packets.append(cap[i]) return packets", "Technically we *could* determine this, but this is simpler parser.add_argument(\"--host\", type=int) args =", "import tests def capture(interface, output_file=\"test\"): cap = sniff(iface=interface, timeout=info.TIMEOUT) # FIXME packets =", "os import shutil import sys import traceback from scapy.sendrecv import sendp, sniff import", "except AssertionError as e: traceback.print_tb(e.__traceback__) status = False if (status): print(\"PASS\") else: print(\"FAIL\")", "<reponame>Stefania12/Router<gh_stars>0 #!/usr/bin/env python3 import argparse import os import shutil import sys import traceback", "sniff import info import tests def capture(interface, output_file=\"test\"): cap = sniff(iface=interface, timeout=info.TIMEOUT) #", "iface): for packet in packets: sendp(packet, iface=iface) def active(host, testname): test = tests.TESTS[testname]", "print(\"PASS\") else: print(\"FAIL\") def send_packets(packets, iface): for packet in packets: sendp(packet, iface=iface) def", "iface = info.get(\"host_if_name\", host) packets = test.active_fn(testname) send_packets(packets, iface) def main(): parser =", "test = tests.TESTS[testname] iface = info.get(\"host_if_name\", host) packets = test.active_fn(testname) send_packets(packets, iface) def", "parser.add_argument(\"--testname\", type=str) # Technically we *could* determine this, but this is simpler parser.add_argument(\"--host\",", "import shutil import sys import traceback from scapy.sendrecv import sendp, sniff import info", "but this is simpler parser.add_argument(\"--host\", type=int) args = parser.parse_args() assert(args.passive ^ args.active) if", "this is simpler parser.add_argument(\"--host\", type=int) args = parser.parse_args() assert(args.passive ^ args.active) if args.passive:", "^ args.active) if args.passive: passive(args.host, args.testname) else: active(args.host, args.testname) if __name__ == \"__main__\":", "host == test.host_r: fn = test.passive_fn elif host == test.host_s: fn = tests.sender_default", "testname): test = tests.TESTS[testname] iface = info.get(\"host_if_name\", host) packets = test.active_fn(testname) send_packets(packets, iface)", "= parser.parse_args() assert(args.passive ^ args.active) if args.passive: passive(args.host, args.testname) else: active(args.host, args.testname) if", "args.active) if args.passive: passive(args.host, args.testname) else: active(args.host, args.testname) if __name__ == \"__main__\": main()", "status = False if (status): print(\"PASS\") else: print(\"FAIL\") def send_packets(packets, iface): for packet", "False if (status): print(\"PASS\") else: print(\"FAIL\") def send_packets(packets, iface): for packet in packets:", "argparse import os import shutil import sys import traceback from scapy.sendrecv import sendp,", "== test.host_s: fn = tests.sender_default else: fn = tests.check_nothing try: status = fn(testname,", "else: print(\"FAIL\") def send_packets(packets, iface): for packet in packets: sendp(packet, iface=iface) def active(host,", "capture(iface) test = tests.TESTS[testname] if host == test.host_r: fn = test.passive_fn elif host", "AssertionError as e: traceback.print_tb(e.__traceback__) status = False if (status): print(\"PASS\") else: print(\"FAIL\") def", "host) packets = test.active_fn(testname) send_packets(packets, iface) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\")", "= False if (status): print(\"PASS\") else: print(\"FAIL\") def send_packets(packets, iface): for packet in", "action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str) # Technically we *could* determine this, but this", "args = parser.parse_args() assert(args.passive ^ args.active) if args.passive: passive(args.host, args.testname) else: active(args.host, args.testname)", "we *could* determine this, but this is simpler parser.add_argument(\"--host\", type=int) args = parser.parse_args()", "in packets: sendp(packet, iface=iface) def active(host, testname): test = tests.TESTS[testname] iface = info.get(\"host_if_name\",", "tests def capture(interface, output_file=\"test\"): cap = sniff(iface=interface, timeout=info.TIMEOUT) # FIXME packets = []", "= tests.sender_default else: fn = tests.check_nothing try: status = fn(testname, packets) except AssertionError", "status = fn(testname, packets) except AssertionError as e: traceback.print_tb(e.__traceback__) status = False if", "if (status): print(\"PASS\") else: print(\"FAIL\") def send_packets(packets, iface): for packet in packets: sendp(packet,", "#!/usr/bin/env python3 import argparse import os import shutil import sys import traceback from", "= fn(testname, packets) except AssertionError as e: traceback.print_tb(e.__traceback__) status = False if (status):", "packets.append(cap[i]) return packets def passive(host, testname): iface = info.get(\"host_if_name\", host) packets = capture(iface)", "import info import tests def capture(interface, output_file=\"test\"): cap = sniff(iface=interface, timeout=info.TIMEOUT) # FIXME", "= info.get(\"host_if_name\", host) packets = test.active_fn(testname) send_packets(packets, iface) def main(): parser = argparse.ArgumentParser()", "import sys import traceback from scapy.sendrecv import sendp, sniff import info import tests", "testname): iface = info.get(\"host_if_name\", host) packets = capture(iface) test = tests.TESTS[testname] if host", "# FIXME packets = [] for i in range(len(cap)): packets.append(cap[i]) return packets def", "parser = argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str) # Technically we *could*", "fn = test.passive_fn elif host == test.host_s: fn = tests.sender_default else: fn =", "tests.TESTS[testname] iface = info.get(\"host_if_name\", host) packets = test.active_fn(testname) send_packets(packets, iface) def main(): parser", "argparse.ArgumentParser() parser.add_argument(\"--passive\", action=\"store_true\") parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str) # Technically we *could* determine this,", "return packets def passive(host, testname): iface = info.get(\"host_if_name\", host) packets = capture(iface) test", "capture(interface, output_file=\"test\"): cap = sniff(iface=interface, timeout=info.TIMEOUT) # FIXME packets = [] for i", "tests.TESTS[testname] if host == test.host_r: fn = test.passive_fn elif host == test.host_s: fn", "parser.add_argument(\"--host\", type=int) args = parser.parse_args() assert(args.passive ^ args.active) if args.passive: passive(args.host, args.testname) else:", "traceback from scapy.sendrecv import sendp, sniff import info import tests def capture(interface, output_file=\"test\"):", "import argparse import os import shutil import sys import traceback from scapy.sendrecv import", "test = tests.TESTS[testname] if host == test.host_r: fn = test.passive_fn elif host ==", "FIXME packets = [] for i in range(len(cap)): packets.append(cap[i]) return packets def passive(host,", "this, but this is simpler parser.add_argument(\"--host\", type=int) args = parser.parse_args() assert(args.passive ^ args.active)", "parser.add_argument(\"--active\", action=\"store_true\") parser.add_argument(\"--testname\", type=str) # Technically we *could* determine this, but this is", "sendp(packet, iface=iface) def active(host, testname): test = tests.TESTS[testname] iface = info.get(\"host_if_name\", host) packets", "packets) except AssertionError as e: traceback.print_tb(e.__traceback__) status = False if (status): print(\"PASS\") else:" ]
[ "to maintain a reference so # they are not released and garbage collected.", "Close a specific document def handle_close(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name']", "for custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler) # Import", "event to save the document event_data = { 'file_name': file_name, 'file_path': file_path }", "ui = app.userInterface NAME1 = 'Data_Handler' NAME2 = \"Custom Import Event\" NAME3 =", "new document import_manager = app.importManager step_options = import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options) # Keep", "used to maintain a reference so # they are not released and garbage", "'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({", "from ...lib import fusion360utils as futil app = adsk.core.Application.get() ui = app.userInterface NAME1", "import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options) # Keep track of imported files config.imported_documents[file_name] = new_document", "# data_file: adsk.core.DataFile = args.file # process_data_file(data_file) document: adsk.core.Document for file_name, document in", "# Store the result of this file config.results.append({ 'Name': data_file.name, 'URN': data_file.versionId, 'Link':", "# they are not released and garbage collected. local_handlers = [] my_data_handlers =", "this Document event_data = { 'file_name': data_file.name, } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info)", "document in config.imported_documents.items(): if document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False) def process_data_file(data_file: adsk.core.DataFile):", "don't disrupt the main application loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import", "new_document = config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported from script', 'tag') # Close a specific", "the dataFile and process it # data_file: adsk.core.DataFile = args.file # process_data_file(data_file) document:", "file_name = event_data['file_name'] file_path = event_data['file_path'] futil.log(f'**********Importing: {file_name}') # Execute the Fusion 360", "[] my_data_handlers = [] my_custom_handlers = [] # Executed when add-in is run.", "Executed when add-in is stopped. Remove events. def stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers", "Event\" NAME3 = \"Custom Save Event\" NAME4 = \"Custom Close Event\" # Local", "NAME4 = \"Custom Close Event\" # Local list of event handlers used to", "Remove events. def stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for custom_item in", "= import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options) # Keep track of imported files config.imported_documents[file_name] =", "file_name, 'file_path': file_path } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save a specific", "config.imported_filenames.remove(data_file.name) # Fire close event for this Document event_data = { 'file_name': data_file.name,", "futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') # Executed when add-in is stopped. Remove events. def stop():", "event_data['file_path'] futil.log(f'**********Importing: {file_name}') # Execute the Fusion 360 import into a new document", "sure we are processing a file imported from this script if data_file.name in", "= new_document config.imported_filenames.append(file_name) # Fire event to save the document event_data = {", "the results def write_results(): futil.log(f\"Writing CSV\") with open(config.csv_file_name, mode='w') as csv_file: fieldnames =", "for: {args.file.name}') # Get the dataFile and process it # data_file: adsk.core.DataFile =", "from the list def handle_import(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] file_path", "config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close,", "document event_data = { 'file_name': file_name, 'file_path': file_path } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save,", "= data_file.publicLink futil.log(f\"**********Created public link for {data_file.name}: {public_link}\") # Store the result of", "as futil app = adsk.core.Application.get() ui = app.userInterface NAME1 = 'Data_Handler' NAME2 =", "{data_file.name}: {public_link}\") # Store the result of this file config.results.append({ 'Name': data_file.name, 'URN':", "futil.log(f'***In application_data_file_complete event handler for: {args.file.name}') # Get the dataFile and process it", "'Name': data_file.name, 'URN': data_file.versionId, 'Link': public_link }) config.imported_filenames.remove(data_file.name) # Fire close event for", "handler for when data files are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers", "be executed by the dataFileComplete event. def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event handler", "it # data_file: adsk.core.DataFile = args.file # process_data_file(data_file) document: adsk.core.Document for file_name, document", "import config from ...lib import fusion360utils as futil app = adsk.core.Application.get() ui =", "Create custom events so we don't disrupt the main application loop. def start():", "Make sure we are processing a file imported from this script if data_file.name", "app.dataFileComplete.remove(data_handler) # Import a document from the list def handle_import(args: adsk.core.CustomEventArgs): event_data =", "Import Event\" NAME3 = \"Custom Save Event\" NAME4 = \"Custom Close Event\" #", "data_file.versionId, 'Link': public_link }) config.imported_filenames.remove(data_file.name) # Fire close event for this Document event_data", "= json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file') # If all documents have been processed", "{file_name}') new_document = config.imported_documents.pop(file_name, False) if new_document: new_document.close(False) # Function to be executed", "True write_results() else: # futil.log(f\"**********Already processed: {data_file.name}\") ... # After all files are", "reference so # they are not released and garbage collected. local_handlers = []", "not config.run_finished: config.run_finished = True write_results() else: # futil.log(f\"**********Already processed: {data_file.name}\") ... #", "After all files are processed write the results def write_results(): futil.log(f\"Writing CSV\") with", "def handle_save(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document =", "with open(config.csv_file_name, mode='w') as csv_file: fieldnames = ['Name', 'URN', 'Link'] writer = csv.DictWriter(csv_file,", "file config.results.append({ 'Name': data_file.name, 'URN': data_file.versionId, 'Link': public_link }) config.imported_filenames.remove(data_file.name) # Fire close", "import adsk.core from ... import config from ...lib import fusion360utils as futil app", "handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') # Executed when add-in", "in config.imported_filenames: try: # Create the public link for the data file public_link", "my_data_handlers: app.dataFileComplete.remove(data_handler) # Import a document from the list def handle_import(args: adsk.core.CustomEventArgs): event_data", "adsk.core.DataFile = args.file # process_data_file(data_file) document: adsk.core.Document for file_name, document in config.imported_documents.items(): if", "my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close }) # Create the event handler", "mode='w') as csv_file: fieldnames = ['Name', 'URN', 'Link'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader()", "app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler) # Import a document from the list", "processed: {data_file.name}\") ... # After all files are processed write the results def", "when add-in is run. Create custom events so we don't disrupt the main", "and process it # data_file: adsk.core.DataFile = args.file # process_data_file(data_file) document: adsk.core.Document for", "# document.close(False) def process_data_file(data_file: adsk.core.DataFile): # Make sure we are processing a file", "documents have been processed finalize results if len(config.imported_filenames) == 0: if not config.run_finished:", "my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler) # Import a document from", "write the results def write_results(): futil.log(f\"Writing CSV\") with open(config.csv_file_name, mode='w') as csv_file: fieldnames", "app.userInterface NAME1 = 'Data_Handler' NAME2 = \"Custom Import Event\" NAME3 = \"Custom Save", "# Import a document from the list def handle_import(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo)", "the public link for the data file public_link = data_file.publicLink futil.log(f\"**********Created public link", "custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import })", "process it # data_file: adsk.core.DataFile = args.file # process_data_file(data_file) document: adsk.core.Document for file_name,", "csv_file: fieldnames = ['Name', 'URN', 'Link'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for row", "config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported from script', 'tag') # Close a specific document def", "= { 'file_name': data_file.name, } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file') #", "data_file.name in config.imported_filenames: try: # Create the public link for the data file", "def stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler'])", "def handle_close(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document =", "def handle_import(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] file_path = event_data['file_path'] futil.log(f'**********Importing:", "process_data_file(data_file: adsk.core.DataFile): # Make sure we are processing a file imported from this", "events. def stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for custom_item in my_custom_handlers:", "Document def handle_save(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document", "file public_link = data_file.publicLink futil.log(f\"**********Created public link for {data_file.name}: {public_link}\") # Store the", "for this Document event_data = { 'file_name': data_file.name, } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close,", "{file_name}') new_document = config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported from script', 'tag') # Close a", "all files are processed write the results def write_results(): futil.log(f\"Writing CSV\") with open(config.csv_file_name,", "adsk.core.Application.get() ui = app.userInterface NAME1 = 'Data_Handler' NAME2 = \"Custom Import Event\" NAME3", "are not released and garbage collected. local_handlers = [] my_data_handlers = [] my_custom_handlers", "local_handlers = [] my_data_handlers = [] my_custom_handlers = [] # Executed when add-in", "public_link }) config.imported_filenames.remove(data_file.name) # Fire close event for this Document event_data = {", "list def handle_import(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] file_path = event_data['file_path']", "is run. Create custom events so we don't disrupt the main application loop.", "for when data files are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added:", "imported files config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name) # Fire event to save the document", "def process_data_file(data_file: adsk.core.DataFile): # Make sure we are processing a file imported from", "}) # Create the event handler for when data files are complete. my_data_handlers.append(", "Store the result of this file config.results.append({ 'Name': data_file.name, 'URN': data_file.versionId, 'Link': public_link", "'tag') # Close a specific document def handle_close(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name", "name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close }) # Create the event", "have been processed finalize results if len(config.imported_filenames) == 0: if not config.run_finished: config.run_finished", "'Data_Handler' NAME2 = \"Custom Import Event\" NAME3 = \"Custom Save Event\" NAME4 =", "event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document = config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported from script', 'tag') #", "futil.log(f\"**********Created public link for {data_file.name}: {public_link}\") # Store the result of this file", "# Fire close event for this Document event_data = { 'file_name': data_file.name, }", "not released and garbage collected. local_handlers = [] my_data_handlers = [] my_custom_handlers =", "specific Document def handle_save(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Saving: {file_name}')", "custom_event_import, 'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3)", "are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}')", "name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save)", "custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save })", "fusion360utils as futil app = adsk.core.Application.get() ui = app.userInterface NAME1 = 'Data_Handler' NAME2", "Fire event to save the document event_data = { 'file_name': file_name, 'file_path': file_path", "config.target_data_folder, 'Imported from script', 'tag') # Close a specific document def handle_close(args: adsk.core.CustomEventArgs):", "0: if not config.run_finished: config.run_finished = True write_results() else: # futil.log(f\"**********Already processed: {data_file.name}\")", "to be executed by the dataFileComplete event. def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event", "Document event_data = { 'file_name': data_file.name, } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except:", "Create the event handler for when data files are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete,", "to save the document event_data = { 'file_name': file_name, 'file_path': file_path } additional_info", "'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({", "# Close a specific document def handle_close(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name =", "if document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False) def process_data_file(data_file: adsk.core.DataFile): # Make sure we are", "add-in is stopped. Remove events. def stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}')", "the main application loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import,", "event handlers used to maintain a reference so # they are not released", "handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event handler for: {args.file.name}') # Get the dataFile and", "if document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False) def process_data_file(data_file: adsk.core.DataFile): # Make sure", "\"Custom Save Event\" NAME4 = \"Custom Close Event\" # Local list of event", "= config.imported_documents.pop(file_name, False) if new_document: new_document.close(False) # Function to be executed by the", "for {data_file.name}: {public_link}\") # Store the result of this file config.results.append({ 'Name': data_file.name,", "they are not released and garbage collected. local_handlers = [] my_data_handlers = []", "files are processed write the results def write_results(): futil.log(f\"Writing CSV\") with open(config.csv_file_name, mode='w')", "# Local list of event handlers used to maintain a reference so #", "NAME3 = \"Custom Save Event\" NAME4 = \"Custom Close Event\" # Local list", "file_path } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save a specific Document def", "link for the data file public_link = data_file.publicLink futil.log(f\"**********Created public link for {data_file.name}:", "= event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document = config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported from script', 'tag')", "loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({", "= futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close)", "the Fusion 360 import into a new document import_manager = app.importManager step_options =", "this script if data_file.name in config.imported_filenames: try: # Create the public link for", "file_path = event_data['file_path'] futil.log(f'**********Importing: {file_name}') # Execute the Fusion 360 import into a", "custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import,", "Execute the Fusion 360 import into a new document import_manager = app.importManager step_options", "config.run_finished = True write_results() else: # futil.log(f\"**********Already processed: {data_file.name}\") ... # After all", "disrupt the main application loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import =", "processed write the results def write_results(): futil.log(f\"Writing CSV\") with open(config.csv_file_name, mode='w') as csv_file:", "'file_name': data_file.name, } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file') # If all", "'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save =", "= app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler':", "'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close, handle_close,", "} additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file') # If all documents have", "custom events so we don't disrupt the main application loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import)", "}) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close,", "a file imported from this script if data_file.name in config.imported_filenames: try: # Create", "finalize results if len(config.imported_filenames) == 0: if not config.run_finished: config.run_finished = True write_results()", "'Imported from script', 'tag') # Close a specific document def handle_close(args: adsk.core.CustomEventArgs): event_data", "when data files are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}')", "futil.log(f'**********Saving: {file_name}') new_document = config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported from script', 'tag') # Close", "= import_manager.importToNewDocument(step_options) # Keep track of imported files config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name) #", "data_file.name, 'URN': data_file.versionId, 'Link': public_link }) config.imported_filenames.remove(data_file.name) # Fire close event for this", "app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save a specific Document def handle_save(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo)", "'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close }) # Create the event handler for when data", "results def write_results(): futil.log(f\"Writing CSV\") with open(config.csv_file_name, mode='w') as csv_file: fieldnames = ['Name',", "stop: {len(my_data_handlers)}') for custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler)", "close event for this Document event_data = { 'file_name': data_file.name, } additional_info =", "def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id':", "event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] file_path = event_data['file_path'] futil.log(f'**********Importing: {file_name}') # Execute", "app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close", "new_document: new_document.close(False) # Function to be executed by the dataFileComplete event. def handle_data_file_complete(args:", "time import adsk.core from ... import config from ...lib import fusion360utils as futil", "= { 'file_name': file_name, 'file_path': file_path } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) #", "futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in my_data_handlers:", "garbage collected. local_handlers = [] my_data_handlers = [] my_custom_handlers = [] # Executed", "in config.imported_documents.items(): if document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False) def process_data_file(data_file: adsk.core.DataFile): #", "maintain a reference so # they are not released and garbage collected. local_handlers", "# Save a specific Document def handle_save(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name =", "= futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close }) #", "# Execute the Fusion 360 import into a new document import_manager = app.importManager", "handler for: {args.file.name}') # Get the dataFile and process it # data_file: adsk.core.DataFile", "If all documents have been processed finalize results if len(config.imported_filenames) == 0: if", "= [] my_custom_handlers = [] # Executed when add-in is run. Create custom", "file_name = event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document = config.imported_documents.pop(file_name, False) if new_document: new_document.close(False) #", "start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import,", "adsk.core from ... import config from ...lib import fusion360utils as futil app =", "# Create the public link for the data file public_link = data_file.publicLink futil.log(f\"**********Created", "data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler) # Import a document from the list def handle_import(args:", "# If all documents have been processed finalize results if len(config.imported_filenames) == 0:", "{len(my_data_handlers)}') # Executed when add-in is stopped. Remove events. def stop(): futil.log(f'**********local_handlers stop:", "custom_event_save, 'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4)", "document from the list def handle_import(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name']", "list of event handlers used to maintain a reference so # they are", "= json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document = config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported", "= json.loads(args.additionalInfo) file_name = event_data['file_name'] file_path = event_data['file_path'] futil.log(f'**********Importing: {file_name}') # Execute the", "step_options = import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options) # Keep track of imported files config.imported_documents[file_name]", "document: adsk.core.Document for file_name, document in config.imported_documents.items(): if document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile) #", "if len(config.imported_filenames) == 0: if not config.run_finished: config.run_finished = True write_results() else: #", "custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id':", "\"Custom Close Event\" # Local list of event handlers used to maintain a", "into a new document import_manager = app.importManager step_options = import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options)", "{ 'file_name': file_name, 'file_path': file_path } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save", "config.imported_documents.items(): if document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False) def process_data_file(data_file: adsk.core.DataFile): # Make", "config from ...lib import fusion360utils as futil app = adsk.core.Application.get() ui = app.userInterface", "= app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler':", "\"Custom Import Event\" NAME3 = \"Custom Save Event\" NAME4 = \"Custom Close Event\"", "futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close }) # Create", "stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id'])", "json.loads(args.additionalInfo) file_name = event_data['file_name'] file_path = event_data['file_path'] futil.log(f'**********Importing: {file_name}') # Execute the Fusion", "we don't disrupt the main application loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import)", "my_custom_handlers = [] # Executed when add-in is run. Create custom events so", "data file public_link = data_file.publicLink futil.log(f\"**********Created public link for {data_file.name}: {public_link}\") # Store", "processed finalize results if len(config.imported_filenames) == 0: if not config.run_finished: config.run_finished = True", "process_data_file(document.dataFile) # document.close(False) def process_data_file(data_file: adsk.core.DataFile): # Make sure we are processing a", "main application loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import, handle_import,", "adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document = config.imported_documents.pop(file_name, False)", "app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import", "{args.file.name}') # Get the dataFile and process it # data_file: adsk.core.DataFile = args.file", "additional_info) # Save a specific Document def handle_save(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name", "specific document def handle_close(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Closing: {file_name}')", "= args.file # process_data_file(data_file) document: adsk.core.Document for file_name, document in config.imported_documents.items(): if document.isValid:", "event handler for when data files are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1))", "Function to be executed by the dataFileComplete event. def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete", "link for {data_file.name}: {public_link}\") # Store the result of this file config.results.append({ 'Name':", "process_data_file(data_file) document: adsk.core.Document for file_name, document in config.imported_documents.items(): if document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile)", "= event_data['file_name'] file_path = event_data['file_path'] futil.log(f'**********Importing: {file_name}') # Execute the Fusion 360 import", "results if len(config.imported_filenames) == 0: if not config.run_finished: config.run_finished = True write_results() else:", "else: # futil.log(f\"**********Already processed: {data_file.name}\") ... # After all files are processed write", "data_file.name, } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file') # If all documents", "file_name = event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document = config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported from script',", "app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event':", "the list def handle_import(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] file_path =", "futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for", "= \"Custom Save Event\" NAME4 = \"Custom Close Event\" # Local list of", "file_name, document in config.imported_documents.items(): if document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False) def process_data_file(data_file:", "} additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save a specific Document def handle_save(args:", "handle_import(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] file_path = event_data['file_path'] futil.log(f'**********Importing: {file_name}')", "futil.log(f'**********Closing: {file_name}') new_document = config.imported_documents.pop(file_name, False) if new_document: new_document.close(False) # Function to be", "from this script if data_file.name in config.imported_filenames: try: # Create the public link", "import csv import json import time import adsk.core from ... import config from", "Local list of event handlers used to maintain a reference so # they", "config.imported_filenames: try: # Create the public link for the data file public_link =", "public link for {data_file.name}: {public_link}\") # Store the result of this file config.results.append({", "if data_file.name in config.imported_filenames: try: # Create the public link for the data", "= ['Name', 'URN', 'Link'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for row in config.results:", "a specific Document def handle_save(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Saving:", "handlers used to maintain a reference so # they are not released and", "stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler", "= [] # Executed when add-in is run. Create custom events so we", "[] my_custom_handlers = [] # Executed when add-in is run. Create custom events", "by the dataFileComplete event. def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event handler for: {args.file.name}')", "Import a document from the list def handle_import(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name", "custom_event_close, 'custom_event_handler': custom_event_handler_close }) # Create the event handler for when data files", "{data_file.name}\") ... # After all files are processed write the results def write_results():", "config.imported_filenames.append(file_name) # Fire event to save the document event_data = { 'file_name': file_name,", "import into a new document import_manager = app.importManager step_options = import_manager.createSTEPImportOptions(file_path) new_document =", "my_data_handlers = [] my_custom_handlers = [] # Executed when add-in is run. Create", "Event\" NAME4 = \"Custom Close Event\" # Local list of event handlers used", "= \"Custom Import Event\" NAME3 = \"Custom Save Event\" NAME4 = \"Custom Close", "futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') # Executed when", "the dataFileComplete event. def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event handler for: {args.file.name}') #", "['Name', 'URN', 'Link'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for row in config.results: writer.writerow(row)", "adsk.core.DataFile): # Make sure we are processing a file imported from this script", "Executed when add-in is run. Create custom events so we don't disrupt the", "config.run_finished: config.run_finished = True write_results() else: # futil.log(f\"**********Already processed: {data_file.name}\") ... # After", "name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close)", "futil.handle_error('process_data_file') # If all documents have been processed finalize results if len(config.imported_filenames) ==", "= app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler':", "file imported from this script if data_file.name in config.imported_filenames: try: # Create the", "for the data file public_link = data_file.publicLink futil.log(f\"**********Created public link for {data_file.name}: {public_link}\")", "a specific document def handle_close(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Closing:", "of this file config.results.append({ 'Name': data_file.name, 'URN': data_file.versionId, 'Link': public_link }) config.imported_filenames.remove(data_file.name) #", "in my_data_handlers: app.dataFileComplete.remove(data_handler) # Import a document from the list def handle_import(args: adsk.core.CustomEventArgs):", "config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save,", "csv import json import time import adsk.core from ... import config from ...lib", "of event handlers used to maintain a reference so # they are not", "adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] file_path = event_data['file_path'] futil.log(f'**********Importing: {file_name}') #", "when add-in is stopped. Remove events. def stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop:", "# Make sure we are processing a file imported from this script if", "are processing a file imported from this script if data_file.name in config.imported_filenames: try:", "= True write_results() else: # futil.log(f\"**********Already processed: {data_file.name}\") ... # After all files", "# Keep track of imported files config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name) # Fire event", "files config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name) # Fire event to save the document event_data", "new_document.close(False) # Function to be executed by the dataFileComplete event. def handle_data_file_complete(args: adsk.core.DataEventArgs):", "write_results() else: # futil.log(f\"**********Already processed: {data_file.name}\") ... # After all files are processed", "# futil.log(f\"**********Already processed: {data_file.name}\") ... # After all files are processed write the", "this file config.results.append({ 'Name': data_file.name, 'URN': data_file.versionId, 'Link': public_link }) config.imported_filenames.remove(data_file.name) # Fire", "dataFile and process it # data_file: adsk.core.DataFile = args.file # process_data_file(data_file) document: adsk.core.Document", "'file_path': file_path } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save a specific Document", "additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file') # If all documents have been", "local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') # Executed when add-in is", "import_manager = app.importManager step_options = import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options) # Keep track of", "write_results(): futil.log(f\"Writing CSV\") with open(config.csv_file_name, mode='w') as csv_file: fieldnames = ['Name', 'URN', 'Link']", "}) config.imported_filenames.remove(data_file.name) # Fire close event for this Document event_data = { 'file_name':", "application_data_file_complete event handler for: {args.file.name}') # Get the dataFile and process it #", "so # they are not released and garbage collected. local_handlers = [] my_data_handlers", "json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document = config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported from", "event. def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event handler for: {args.file.name}') # Get the", "imported from this script if data_file.name in config.imported_filenames: try: # Create the public", "{len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') # Executed when add-in is stopped. Remove events. def", "data_file: adsk.core.DataFile = args.file # process_data_file(data_file) document: adsk.core.Document for file_name, document in config.imported_documents.items():", "app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save", "additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save a specific Document def handle_save(args: adsk.core.CustomEventArgs):", "open(config.csv_file_name, mode='w') as csv_file: fieldnames = ['Name', 'URN', 'Link'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames)", "save the document event_data = { 'file_name': file_name, 'file_path': file_path } additional_info =", "NAME1 = 'Data_Handler' NAME2 = \"Custom Import Event\" NAME3 = \"Custom Save Event\"", "config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close }) # Create the event handler for when", "adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document = config.imported_documents[file_name] new_document.saveAs(file_name,", "def write_results(): futil.log(f\"Writing CSV\") with open(config.csv_file_name, mode='w') as csv_file: fieldnames = ['Name', 'URN',", "a document from the list def handle_import(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name =", "adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event handler for: {args.file.name}') # Get the dataFile and process", "released and garbage collected. local_handlers = [] my_data_handlers = [] my_custom_handlers = []", "args.file # process_data_file(data_file) document: adsk.core.Document for file_name, document in config.imported_documents.items(): if document.isValid: if", "config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name) # Fire event to save the document event_data =", "'URN': data_file.versionId, 'Link': public_link }) config.imported_filenames.remove(data_file.name) # Fire close event for this Document", "futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') # Executed when add-in is stopped. Remove", "document def handle_close(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document", "'file_name': file_name, 'file_path': file_path } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save a", "additional_info) except: futil.handle_error('process_data_file') # If all documents have been processed finalize results if", "[] # Executed when add-in is run. Create custom events so we don't", "public link for the data file public_link = data_file.publicLink futil.log(f\"**********Created public link for", "futil.log(f\"Writing CSV\") with open(config.csv_file_name, mode='w') as csv_file: fieldnames = ['Name', 'URN', 'Link'] writer", "all documents have been processed finalize results if len(config.imported_filenames) == 0: if not", "config.imported_documents.pop(file_name, False) if new_document: new_document.close(False) # Function to be executed by the dataFileComplete", "# process_data_file(data_file) document: adsk.core.Document for file_name, document in config.imported_documents.items(): if document.isValid: if document.dataFile.isComplete:", "= event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document = config.imported_documents.pop(file_name, False) if new_document: new_document.close(False) # Function", "new_document config.imported_filenames.append(file_name) # Fire event to save the document event_data = { 'file_name':", "event_data = { 'file_name': data_file.name, } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file')", "... # After all files are processed write the results def write_results(): futil.log(f\"Writing", "adsk.core.Document for file_name, document in config.imported_documents.items(): if document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False)", "futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save", "new_document = import_manager.importToNewDocument(step_options) # Keep track of imported files config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name)", "handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close }) # Create the", "NAME2 = \"Custom Import Event\" NAME3 = \"Custom Save Event\" NAME4 = \"Custom", "collected. local_handlers = [] my_data_handlers = [] my_custom_handlers = [] # Executed when", "complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') #", "event_data['file_name'] file_path = event_data['file_path'] futil.log(f'**********Importing: {file_name}') # Execute the Fusion 360 import into", "run. Create custom events so we don't disrupt the main application loop. def", "the event handler for when data files are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers,", "json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save a specific Document def handle_save(args: adsk.core.CustomEventArgs): event_data =", "name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') # Executed when add-in is stopped.", "event_data = { 'file_name': file_name, 'file_path': file_path } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info)", "import_manager.importToNewDocument(step_options) # Keep track of imported files config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name) # Fire", "event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document = config.imported_documents.pop(file_name, False) if new_document: new_document.close(False) # Function to", "import time import adsk.core from ... import config from ...lib import fusion360utils as", "# Get the dataFile and process it # data_file: adsk.core.DataFile = args.file #", "custom_event_handler_close }) # Create the event handler for when data files are complete.", "script', 'tag') # Close a specific document def handle_close(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo)", "processing a file imported from this script if data_file.name in config.imported_filenames: try: #", "CSV\") with open(config.csv_file_name, mode='w') as csv_file: fieldnames = ['Name', 'URN', 'Link'] writer =", "== 0: if not config.run_finished: config.run_finished = True write_results() else: # futil.log(f\"**********Already processed:", "'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close }) # Create the event handler for", "import json import time import adsk.core from ... import config from ...lib import", "Get the dataFile and process it # data_file: adsk.core.DataFile = args.file # process_data_file(data_file)", "is stopped. Remove events. def stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for", "json import time import adsk.core from ... import config from ...lib import fusion360utils", "data files are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers", "as csv_file: fieldnames = ['Name', 'URN', 'Link'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for", "my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') # Executed", "app.importManager step_options = import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options) # Keep track of imported files", "for file_name, document in config.imported_documents.items(): if document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False) def", "Close Event\" # Local list of event handlers used to maintain a reference", "import fusion360utils as futil app = adsk.core.Application.get() ui = app.userInterface NAME1 = 'Data_Handler'", "dataFileComplete event. def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event handler for: {args.file.name}') # Get", "fieldnames = ['Name', 'URN', 'Link'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for row in", "futil app = adsk.core.Application.get() ui = app.userInterface NAME1 = 'Data_Handler' NAME2 = \"Custom", "Save Event\" NAME4 = \"Custom Close Event\" # Local list of event handlers", "= futil.add_handler(custom_event_import, handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save)", "Keep track of imported files config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name) # Fire event to", "Save a specific Document def handle_save(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name']", "= \"Custom Close Event\" # Local list of event handlers used to maintain", "so we don't disrupt the main application loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import =", "custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler) # Import a", "track of imported files config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name) # Fire event to save", "= [] my_data_handlers = [] my_custom_handlers = [] # Executed when add-in is", "document import_manager = app.importManager step_options = import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options) # Keep track", "executed by the dataFileComplete event. def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event handler for:", "from script', 'tag') # Close a specific document def handle_close(args: adsk.core.CustomEventArgs): event_data =", "files are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers, name=NAME1)) futil.log(f'**********local_handlers added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added:", "'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close =", "= adsk.core.Application.get() ui = app.userInterface NAME1 = 'Data_Handler' NAME2 = \"Custom Import Event\"", "= 'Data_Handler' NAME2 = \"Custom Import Event\" NAME3 = \"Custom Save Event\" NAME4", "False) if new_document: new_document.close(False) # Function to be executed by the dataFileComplete event.", "{len(my_data_handlers)}') for custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler) #", "of imported files config.imported_documents[file_name] = new_document config.imported_filenames.append(file_name) # Fire event to save the", "if not config.run_finished: config.run_finished = True write_results() else: # futil.log(f\"**********Already processed: {data_file.name}\") ...", "are processed write the results def write_results(): futil.log(f\"Writing CSV\") with open(config.csv_file_name, mode='w') as", "app = adsk.core.Application.get() ui = app.userInterface NAME1 = 'Data_Handler' NAME2 = \"Custom Import", "{file_name}') # Execute the Fusion 360 import into a new document import_manager =", "{public_link}\") # Store the result of this file config.results.append({ 'Name': data_file.name, 'URN': data_file.versionId,", "new_document.saveAs(file_name, config.target_data_folder, 'Imported from script', 'tag') # Close a specific document def handle_close(args:", "application loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import = app.registerCustomEvent(config.custom_event_id_import) custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2)", "json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document = config.imported_documents.pop(file_name, False) if new_document: new_document.close(False)", "if new_document: new_document.close(False) # Function to be executed by the dataFileComplete event. def", "document.isValid: if document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False) def process_data_file(data_file: adsk.core.DataFile): # Make sure we", "= app.importManager step_options = import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options) # Keep track of imported", "my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save", "}) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save,", "a new document import_manager = app.importManager step_options = import_manager.createSTEPImportOptions(file_path) new_document = import_manager.importToNewDocument(step_options) #", "Fire close event for this Document event_data = { 'file_name': data_file.name, } additional_info", "added: {len(my_data_handlers)}') # Executed when add-in is stopped. Remove events. def stop(): futil.log(f'**********local_handlers", "# Executed when add-in is run. Create custom events so we don't disrupt", "'Link': public_link }) config.imported_filenames.remove(data_file.name) # Fire close event for this Document event_data =", "len(config.imported_filenames) == 0: if not config.run_finished: config.run_finished = True write_results() else: # futil.log(f\"**********Already", "config.results.append({ 'Name': data_file.name, 'URN': data_file.versionId, 'Link': public_link }) config.imported_filenames.remove(data_file.name) # Fire close event", "'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save, handle_save,", "custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler) # Import a document from the", "{len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for custom_item in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in", "except: futil.handle_error('process_data_file') # If all documents have been processed finalize results if len(config.imported_filenames)", "my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close", "in my_custom_handlers: custom_item['custom_event'].remove(custom_item['custom_event_handler']) app.unregisterCustomEvent(custom_item['custom_event_id']) for data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler) # Import a document", "app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event':", "futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close", "futil.log(f'**********Importing: {file_name}') # Execute the Fusion 360 import into a new document import_manager", "new_document = config.imported_documents.pop(file_name, False) if new_document: new_document.close(False) # Function to be executed by", "= json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document = config.imported_documents.pop(file_name, False) if new_document:", "handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save, 'custom_event_handler': custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close =", "# Executed when add-in is stopped. Remove events. def stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}')", "public_link = data_file.publicLink futil.log(f\"**********Created public link for {data_file.name}: {public_link}\") # Store the result", "= json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_save, additional_info) # Save a specific Document def handle_save(args: adsk.core.CustomEventArgs): event_data", "<reponame>tapnair/ImportAndShare import csv import json import time import adsk.core from ... import config", "event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document = config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder,", "= app.userInterface NAME1 = 'Data_Handler' NAME2 = \"Custom Import Event\" NAME3 = \"Custom", "... import config from ...lib import fusion360utils as futil app = adsk.core.Application.get() ui", "app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file') # If all documents have been processed finalize results", "...lib import fusion360utils as futil app = adsk.core.Application.get() ui = app.userInterface NAME1 =", "futil.log(f\"**********Already processed: {data_file.name}\") ... # After all files are processed write the results", "script if data_file.name in config.imported_filenames: try: # Create the public link for the", "custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close, 'custom_event_handler': custom_event_handler_close })", "event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document = config.imported_documents.pop(file_name, False) if", "added: {len(local_handlers)}') futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}') # Executed when add-in is stopped. Remove events.", "from ... import config from ...lib import fusion360utils as futil app = adsk.core.Application.get()", "handle_import, name=NAME2) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_import, 'custom_event': custom_event_import, 'custom_event_handler': custom_event_handler_import }) app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save =", "handle_close(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Closing: {file_name}') new_document = config.imported_documents.pop(file_name,", "custom_event_handler_save }) app.unregisterCustomEvent(config.custom_event_id_close) custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id':", "event for this Document event_data = { 'file_name': data_file.name, } additional_info = json.dumps(event_data)", "and garbage collected. local_handlers = [] my_data_handlers = [] my_custom_handlers = [] #", "# Function to be executed by the dataFileComplete event. def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In", "360 import into a new document import_manager = app.importManager step_options = import_manager.createSTEPImportOptions(file_path) new_document", "Fusion 360 import into a new document import_manager = app.importManager step_options = import_manager.createSTEPImportOptions(file_path)", "Create the public link for the data file public_link = data_file.publicLink futil.log(f\"**********Created public", "document.close(False) def process_data_file(data_file: adsk.core.DataFile): # Make sure we are processing a file imported", "event handler for: {args.file.name}') # Get the dataFile and process it # data_file:", "a reference so # they are not released and garbage collected. local_handlers =", "been processed finalize results if len(config.imported_filenames) == 0: if not config.run_finished: config.run_finished =", "the result of this file config.results.append({ 'Name': data_file.name, 'URN': data_file.versionId, 'Link': public_link })", "= config.imported_documents[file_name] new_document.saveAs(file_name, config.target_data_folder, 'Imported from script', 'tag') # Close a specific document", "{ 'file_name': data_file.name, } additional_info = json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file') # If", "# After all files are processed write the results def write_results(): futil.log(f\"Writing CSV\")", "# Fire event to save the document event_data = { 'file_name': file_name, 'file_path':", "Event\" # Local list of event handlers used to maintain a reference so", "# Create the event handler for when data files are complete. my_data_handlers.append( futil.add_handler(app.dataFileComplete,", "stopped. Remove events. def stop(): futil.log(f'**********local_handlers stop: {len(local_handlers)}') futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}') for custom_item", "def handle_data_file_complete(args: adsk.core.DataEventArgs): futil.log(f'***In application_data_file_complete event handler for: {args.file.name}') # Get the dataFile", "= event_data['file_path'] futil.log(f'**********Importing: {file_name}') # Execute the Fusion 360 import into a new", "result of this file config.results.append({ 'Name': data_file.name, 'URN': data_file.versionId, 'Link': public_link }) config.imported_filenames.remove(data_file.name)", "handle_save(args: adsk.core.CustomEventArgs): event_data = json.loads(args.additionalInfo) file_name = event_data['file_name'] futil.log(f'**********Saving: {file_name}') new_document = config.imported_documents[file_name]", "'custom_event_handler': custom_event_handler_close }) # Create the event handler for when data files are", "events so we don't disrupt the main application loop. def start(): app.unregisterCustomEvent(config.custom_event_id_import) custom_event_import", "custom_event_close = app.registerCustomEvent(config.custom_event_id_close) custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_close, 'custom_event': custom_event_close,", "the document event_data = { 'file_name': file_name, 'file_path': file_path } additional_info = json.dumps(event_data)", "try: # Create the public link for the data file public_link = data_file.publicLink", "for data_handler in my_data_handlers: app.dataFileComplete.remove(data_handler) # Import a document from the list def", "custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event': custom_event_save,", "we are processing a file imported from this script if data_file.name in config.imported_filenames:", "json.dumps(event_data) app.fireCustomEvent(config.custom_event_id_close, additional_info) except: futil.handle_error('process_data_file') # If all documents have been processed finalize", "document.dataFile.isComplete: process_data_file(document.dataFile) # document.close(False) def process_data_file(data_file: adsk.core.DataFile): # Make sure we are processing", "the data file public_link = data_file.publicLink futil.log(f\"**********Created public link for {data_file.name}: {public_link}\") #", "add-in is run. Create custom events so we don't disrupt the main application", "app.unregisterCustomEvent(config.custom_event_id_save) custom_event_save = app.registerCustomEvent(config.custom_event_id_save) custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3) my_custom_handlers.append({ 'custom_event_id': config.custom_event_id_save, 'custom_event':", "data_file.publicLink futil.log(f\"**********Created public link for {data_file.name}: {public_link}\") # Store the result of this" ]
[ "import deepdish as dd import pandas as pd def nested_dict(): return collections.defaultdict(nested_dict) def", "\"\"\" save_path = Path(__file__).parents[2] / path if save: dd.io.save(save_path, dataset) return None def", "deepdish as dd import pandas as pd def nested_dict(): return collections.defaultdict(nested_dict) def save_dataset(path,", "Parameters ---------- path : str path to save. dataset : dataset pytorch dataset.", "collections.defaultdict(nested_dict) def save_dataset(path, dataset, save): \"\"\"save the dataset. Parameters ---------- path : str", "file_name[-2] + '_compressed.' save_path = ''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc', 5)) return None def", "file_name[-2] = file_name[-2] + '_compressed.' save_path = ''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc', 5)) return", "import pandas as pd def nested_dict(): return collections.defaultdict(nested_dict) def save_dataset(path, dataset, save): \"\"\"save", "---------- path : str path to save. dataset : dataset pytorch dataset. save", "Bool \"\"\" dataset = dd.io.load(path) # New name file_name = path.split('.') file_name[-2] =", ": Bool \"\"\" dataset = dd.io.load(path) # New name file_name = path.split('.') file_name[-2]", "dd.io.save(save_path, dataset, compression=('blosc', 5)) return None def save_dataframe(path, dataframe, save): save_path = Path(__file__).parents[2]", "as pd def nested_dict(): return collections.defaultdict(nested_dict) def save_dataset(path, dataset, save): \"\"\"save the dataset.", "\"\"\"save the dataset. Parameters ---------- path : str path to save. dataset :", "def save_dataset(path, dataset, save): \"\"\"save the dataset. Parameters ---------- path : str path", "import Path import deepdish as dd import pandas as pd def nested_dict(): return", "return None def compress_dataset(path): \"\"\"compress the dataset. Parameters ---------- path : str path", "None def compress_dataset(path): \"\"\"compress the dataset. Parameters ---------- path : str path to", "dataset, save): \"\"\"save the dataset. Parameters ---------- path : str path to save.", "pytorch dataset. save : Bool \"\"\" dataset = dd.io.load(path) # New name file_name", "save. dataset : dataset pytorch dataset. save : Bool \"\"\" dataset = dd.io.load(path)", "Path(__file__).parents[2] / path df = pd.read_csv(read_path) return df def read_dataset(path): \"\"\"Read the dataset.", "save_path = Path(__file__).parents[2] / path if save: dd.io.save(save_path, dataset) return None def compress_dataset(path):", "compress_dataset(path): \"\"\"compress the dataset. Parameters ---------- path : str path to save. dataset", "save_path = ''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc', 5)) return None def save_dataframe(path, dataframe, save):", "= Path(__file__).parents[2] / path if save: dd.io.save(save_path, dataset) return None def compress_dataset(path): \"\"\"compress", "save : Bool \"\"\" read_path = Path(__file__).parents[2] / path data = dd.io.load(read_path) return", "save: dataframe.to_csv(save_path, index=False) return None def read_dataframe(path): read_path = Path(__file__).parents[2] / path df", ": Bool \"\"\" read_path = Path(__file__).parents[2] / path data = dd.io.load(read_path) return data", "dd import pandas as pd def nested_dict(): return collections.defaultdict(nested_dict) def save_dataset(path, dataset, save):", "read_path = Path(__file__).parents[2] / path df = pd.read_csv(read_path) return df def read_dataset(path): \"\"\"Read", "Path import deepdish as dd import pandas as pd def nested_dict(): return collections.defaultdict(nested_dict)", "if save: dd.io.save(save_path, dataset) return None def compress_dataset(path): \"\"\"compress the dataset. Parameters ----------", "name file_name = path.split('.') file_name[-2] = file_name[-2] + '_compressed.' save_path = ''.join(file_name) dd.io.save(save_path,", "if save: dataframe.to_csv(save_path, index=False) return None def read_dataframe(path): read_path = Path(__file__).parents[2] / path", "\"\"\"compress the dataset. Parameters ---------- path : str path to save. dataset :", "save. dataset : dataset pytorch dataset. save : Bool \"\"\" read_path = Path(__file__).parents[2]", "dd.io.load(path) # New name file_name = path.split('.') file_name[-2] = file_name[-2] + '_compressed.' save_path", ": dataset pytorch dataset. save : Bool \"\"\" dataset = dd.io.load(path) # New", "pandas as pd def nested_dict(): return collections.defaultdict(nested_dict) def save_dataset(path, dataset, save): \"\"\"save the", "path to save. dataset : dataset pytorch dataset. save : Bool \"\"\" dataset", "None def read_dataframe(path): read_path = Path(__file__).parents[2] / path df = pd.read_csv(read_path) return df", "pd def nested_dict(): return collections.defaultdict(nested_dict) def save_dataset(path, dataset, save): \"\"\"save the dataset. Parameters", ": Bool \"\"\" save_path = Path(__file__).parents[2] / path if save: dd.io.save(save_path, dataset) return", "dataset : dataset pytorch dataset. save : Bool \"\"\" read_path = Path(__file__).parents[2] /", "5)) return None def save_dataframe(path, dataframe, save): save_path = Path(__file__).parents[2] / path if", "'_compressed.' save_path = ''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc', 5)) return None def save_dataframe(path, dataframe,", "dataset. save : Bool \"\"\" dataset = dd.io.load(path) # New name file_name =", "def save_dataframe(path, dataframe, save): save_path = Path(__file__).parents[2] / path if save: dataframe.to_csv(save_path, index=False)", "def read_dataset(path): \"\"\"Read the dataset. Parameters ---------- path : str path to save.", "pytorch dataset. save : Bool \"\"\" read_path = Path(__file__).parents[2] / path data =", "path to save. dataset : dataset pytorch dataset. save : Bool \"\"\" save_path", "save_dataframe(path, dataframe, save): save_path = Path(__file__).parents[2] / path if save: dataframe.to_csv(save_path, index=False) return", "+ '_compressed.' save_path = ''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc', 5)) return None def save_dataframe(path,", "read_dataset(path): \"\"\"Read the dataset. Parameters ---------- path : str path to save. dataset", "None def save_dataframe(path, dataframe, save): save_path = Path(__file__).parents[2] / path if save: dataframe.to_csv(save_path,", "return df def read_dataset(path): \"\"\"Read the dataset. Parameters ---------- path : str path", "Path(__file__).parents[2] / path if save: dd.io.save(save_path, dataset) return None def compress_dataset(path): \"\"\"compress the", "pytorch dataset. save : Bool \"\"\" save_path = Path(__file__).parents[2] / path if save:", "def nested_dict(): return collections.defaultdict(nested_dict) def save_dataset(path, dataset, save): \"\"\"save the dataset. Parameters ----------", "\"\"\" dataset = dd.io.load(path) # New name file_name = path.split('.') file_name[-2] = file_name[-2]", "= Path(__file__).parents[2] / path df = pd.read_csv(read_path) return df def read_dataset(path): \"\"\"Read the", "save_path = Path(__file__).parents[2] / path if save: dataframe.to_csv(save_path, index=False) return None def read_dataframe(path):", "save): \"\"\"save the dataset. Parameters ---------- path : str path to save. dataset", "dataset. save : Bool \"\"\" read_path = Path(__file__).parents[2] / path data = dd.io.load(read_path)", "= pd.read_csv(read_path) return df def read_dataset(path): \"\"\"Read the dataset. Parameters ---------- path :", "= Path(__file__).parents[2] / path if save: dataframe.to_csv(save_path, index=False) return None def read_dataframe(path): read_path", "df = pd.read_csv(read_path) return df def read_dataset(path): \"\"\"Read the dataset. Parameters ---------- path", "pathlib import Path import deepdish as dd import pandas as pd def nested_dict():", "/ path if save: dataframe.to_csv(save_path, index=False) return None def read_dataframe(path): read_path = Path(__file__).parents[2]", "dataset) return None def compress_dataset(path): \"\"\"compress the dataset. Parameters ---------- path : str", "dataset = dd.io.load(path) # New name file_name = path.split('.') file_name[-2] = file_name[-2] +", "= ''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc', 5)) return None def save_dataframe(path, dataframe, save): save_path", "= path.split('.') file_name[-2] = file_name[-2] + '_compressed.' save_path = ''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc',", "save. dataset : dataset pytorch dataset. save : Bool \"\"\" save_path = Path(__file__).parents[2]", "\"\"\"Read the dataset. Parameters ---------- path : str path to save. dataset :", "dataset. save : Bool \"\"\" save_path = Path(__file__).parents[2] / path if save: dd.io.save(save_path,", "dataset, compression=('blosc', 5)) return None def save_dataframe(path, dataframe, save): save_path = Path(__file__).parents[2] /", "to save. dataset : dataset pytorch dataset. save : Bool \"\"\" dataset =", "<reponame>HemuManju/integrated-gradients-weighted-ica import collections from pathlib import Path import deepdish as dd import pandas", "def compress_dataset(path): \"\"\"compress the dataset. Parameters ---------- path : str path to save.", "Bool \"\"\" save_path = Path(__file__).parents[2] / path if save: dd.io.save(save_path, dataset) return None", "save : Bool \"\"\" dataset = dd.io.load(path) # New name file_name = path.split('.')", "nested_dict(): return collections.defaultdict(nested_dict) def save_dataset(path, dataset, save): \"\"\"save the dataset. Parameters ---------- path", "# New name file_name = path.split('.') file_name[-2] = file_name[-2] + '_compressed.' save_path =", "path if save: dataframe.to_csv(save_path, index=False) return None def read_dataframe(path): read_path = Path(__file__).parents[2] /", "path : str path to save. dataset : dataset pytorch dataset. save :", "return None def save_dataframe(path, dataframe, save): save_path = Path(__file__).parents[2] / path if save:", "dataset pytorch dataset. save : Bool \"\"\" dataset = dd.io.load(path) # New name", "save): save_path = Path(__file__).parents[2] / path if save: dataframe.to_csv(save_path, index=False) return None def", "return None def read_dataframe(path): read_path = Path(__file__).parents[2] / path df = pd.read_csv(read_path) return", ": dataset pytorch dataset. save : Bool \"\"\" read_path = Path(__file__).parents[2] / path", ": str path to save. dataset : dataset pytorch dataset. save : Bool", "New name file_name = path.split('.') file_name[-2] = file_name[-2] + '_compressed.' save_path = ''.join(file_name)", "dataset pytorch dataset. save : Bool \"\"\" save_path = Path(__file__).parents[2] / path if", "the dataset. Parameters ---------- path : str path to save. dataset : dataset", "file_name = path.split('.') file_name[-2] = file_name[-2] + '_compressed.' save_path = ''.join(file_name) dd.io.save(save_path, dataset,", "/ path if save: dd.io.save(save_path, dataset) return None def compress_dataset(path): \"\"\"compress the dataset.", "dataset pytorch dataset. save : Bool \"\"\" read_path = Path(__file__).parents[2] / path data", "= dd.io.load(path) # New name file_name = path.split('.') file_name[-2] = file_name[-2] + '_compressed.'", "df def read_dataset(path): \"\"\"Read the dataset. Parameters ---------- path : str path to", "dataframe, save): save_path = Path(__file__).parents[2] / path if save: dataframe.to_csv(save_path, index=False) return None", "/ path df = pd.read_csv(read_path) return df def read_dataset(path): \"\"\"Read the dataset. Parameters", "dataset : dataset pytorch dataset. save : Bool \"\"\" save_path = Path(__file__).parents[2] /", "path df = pd.read_csv(read_path) return df def read_dataset(path): \"\"\"Read the dataset. Parameters ----------", "save : Bool \"\"\" save_path = Path(__file__).parents[2] / path if save: dd.io.save(save_path, dataset)", "save: dd.io.save(save_path, dataset) return None def compress_dataset(path): \"\"\"compress the dataset. Parameters ---------- path", "compression=('blosc', 5)) return None def save_dataframe(path, dataframe, save): save_path = Path(__file__).parents[2] / path", "to save. dataset : dataset pytorch dataset. save : Bool \"\"\" read_path =", "read_dataframe(path): read_path = Path(__file__).parents[2] / path df = pd.read_csv(read_path) return df def read_dataset(path):", "path.split('.') file_name[-2] = file_name[-2] + '_compressed.' save_path = ''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc', 5))", "Path(__file__).parents[2] / path if save: dataframe.to_csv(save_path, index=False) return None def read_dataframe(path): read_path =", "save_dataset(path, dataset, save): \"\"\"save the dataset. Parameters ---------- path : str path to", "def read_dataframe(path): read_path = Path(__file__).parents[2] / path df = pd.read_csv(read_path) return df def", "dataset : dataset pytorch dataset. save : Bool \"\"\" dataset = dd.io.load(path) #", "from pathlib import Path import deepdish as dd import pandas as pd def", "path if save: dd.io.save(save_path, dataset) return None def compress_dataset(path): \"\"\"compress the dataset. Parameters", "to save. dataset : dataset pytorch dataset. save : Bool \"\"\" save_path =", "as dd import pandas as pd def nested_dict(): return collections.defaultdict(nested_dict) def save_dataset(path, dataset,", "dataframe.to_csv(save_path, index=False) return None def read_dataframe(path): read_path = Path(__file__).parents[2] / path df =", "pd.read_csv(read_path) return df def read_dataset(path): \"\"\"Read the dataset. Parameters ---------- path : str", "return collections.defaultdict(nested_dict) def save_dataset(path, dataset, save): \"\"\"save the dataset. Parameters ---------- path :", "path to save. dataset : dataset pytorch dataset. save : Bool \"\"\" read_path", "''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc', 5)) return None def save_dataframe(path, dataframe, save): save_path =", ": dataset pytorch dataset. save : Bool \"\"\" save_path = Path(__file__).parents[2] / path", "dd.io.save(save_path, dataset) return None def compress_dataset(path): \"\"\"compress the dataset. Parameters ---------- path :", "str path to save. dataset : dataset pytorch dataset. save : Bool \"\"\"", "= file_name[-2] + '_compressed.' save_path = ''.join(file_name) dd.io.save(save_path, dataset, compression=('blosc', 5)) return None", "index=False) return None def read_dataframe(path): read_path = Path(__file__).parents[2] / path df = pd.read_csv(read_path)", "import collections from pathlib import Path import deepdish as dd import pandas as", "dataset. Parameters ---------- path : str path to save. dataset : dataset pytorch", "collections from pathlib import Path import deepdish as dd import pandas as pd" ]
[ "screen = turtle.Screen() screen.bgcolor('black') t.pensize(2) t.speed(0) while(True): for i in range(6): for colors", "= turtle.Turtle() screen = turtle.Screen() screen.bgcolor('black') t.pensize(2) t.speed(0) while(True): for i in range(6):", "i in range(6): for colors in ['red', 'blue', 'magenta', 'green', 'yellow', 'white']: t.color(colors)", "while(True): for i in range(6): for colors in ['red', 'blue', 'magenta', 'green', 'yellow',", "screen.bgcolor('black') t.pensize(2) t.speed(0) while(True): for i in range(6): for colors in ['red', 'blue',", "in range(6): for colors in ['red', 'blue', 'magenta', 'green', 'yellow', 'white']: t.color(colors) t.circle(100)", "turtle.Turtle() screen = turtle.Screen() screen.bgcolor('black') t.pensize(2) t.speed(0) while(True): for i in range(6): for", "range(6): for colors in ['red', 'blue', 'magenta', 'green', 'yellow', 'white']: t.color(colors) t.circle(100) t.left(10)", "= turtle.Screen() screen.bgcolor('black') t.pensize(2) t.speed(0) while(True): for i in range(6): for colors in", "import turtle t = turtle.Turtle() screen = turtle.Screen() screen.bgcolor('black') t.pensize(2) t.speed(0) while(True): for", "<reponame>RicardoMart922/estudo_Python import turtle t = turtle.Turtle() screen = turtle.Screen() screen.bgcolor('black') t.pensize(2) t.speed(0) while(True):", "t.pensize(2) t.speed(0) while(True): for i in range(6): for colors in ['red', 'blue', 'magenta',", "for i in range(6): for colors in ['red', 'blue', 'magenta', 'green', 'yellow', 'white']:", "t = turtle.Turtle() screen = turtle.Screen() screen.bgcolor('black') t.pensize(2) t.speed(0) while(True): for i in", "turtle t = turtle.Turtle() screen = turtle.Screen() screen.bgcolor('black') t.pensize(2) t.speed(0) while(True): for i", "for colors in ['red', 'blue', 'magenta', 'green', 'yellow', 'white']: t.color(colors) t.circle(100) t.left(10) t.hideturtle", "turtle.Screen() screen.bgcolor('black') t.pensize(2) t.speed(0) while(True): for i in range(6): for colors in ['red',", "t.speed(0) while(True): for i in range(6): for colors in ['red', 'blue', 'magenta', 'green'," ]
[ "KERNEL_LAMBDA = 1.0 T_MIN = 0.0001 N_NEIGHBOURS = 8 MAX_MODEL_SIZE = (1 <<", "SYS_LINUX = 'linux' SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB = 'goog_colab' # Google drive downloader", "# Default Search Space DEFAULT_LENGTH_SEARCH = [50, 75, 100] DEFAULT_WIDTH_SEARCH = [64, 128,", "MLP_MODEL_WIDTH = 5 MODEL_WIDTH = 64 POOLING_KERNEL_SIZE = 2 # ModelTrainer DATA_AUGMENTATION =", "CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE = 1 MODEL_LEN", "# text preprocessor EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS = 5000 EXTRACT_PATH", "N_NEIGHBOURS = 8 MAX_MODEL_SIZE = (1 << 25) MAX_LAYER_WIDTH = 4096 MAX_LAYERS =", "0.25 CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE = 1 MODEL_LEN = 3 MLP_MODEL_LEN = 3", "# Text Classifier BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE = 32 # text preprocessor EMBEDDING_DIM", "VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES = 1 CUTOUT_RATIO = 0.5 # Searcher MAX_MODEL_NUM =", "75, 100] DEFAULT_WIDTH_SEARCH = [64, 128, 256] # Model Defaults DENSE_DROPOUT_RATE = 0.5", "1.0 T_MIN = 0.0001 N_NEIGHBOURS = 8 MAX_MODEL_SIZE = (1 << 25) MAX_LAYER_WIDTH", "CUTOUT_RATIO = 0.5 # Searcher MAX_MODEL_NUM = 1000 BETA = 2.576 KERNEL_LAMBDA =", "= 200 # Text Classifier BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE = 32 # text", "= 5000 EXTRACT_PATH = \"glove/\" STORE_PATH = '' # Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED", "\\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth')", "SEARCH_MAX_ITER = 200 # Text Classifier BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE = 32 #", "(1 << 25) MAX_LAYER_WIDTH = 4096 MAX_LAYERS = 200 # Grid Dimensions LENGTH_DIM", "= 2 # ModelTrainer DATA_AUGMENTATION = True MAX_ITER_NUM = 200 MIN_LOSS_DEC = 1e-4", "SYS_GOOGLE_COLAB = 'goog_colab' # Google drive downloader CHUNK_SIZE = 32768 DOWNLOAD_URL = \"https://docs.google.com/uc?export=download\"", "= 4 BERT_TRAINER_BATCH_SIZE = 32 # text preprocessor EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH =", "= 1000 BETA = 2.576 KERNEL_LAMBDA = 1.0 T_MIN = 0.0001 N_NEIGHBOURS =", "'' # Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\", "Text Classifier BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE = 32 # text preprocessor EMBEDDING_DIM =", "MAX_ITER_NUM = 200 MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE = 128 LIMIT_MEMORY", "<< 25) MAX_LAYER_WIDTH = 4096 MAX_LAYERS = 200 # Grid Dimensions LENGTH_DIM =", "MODEL_LEN = 3 MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH = 5 MODEL_WIDTH = 64 POOLING_KERNEL_SIZE", "0.5 CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE = 1", "BACKEND = 'torch' # Data VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES = 1 CUTOUT_RATIO =", "MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE = 128 LIMIT_MEMORY = False SEARCH_MAX_ITER = 200 #", "Default Search Space DEFAULT_LENGTH_SEARCH = [50, 75, 100] DEFAULT_WIDTH_SEARCH = [64, 128, 256]", "5 MODEL_WIDTH = 64 POOLING_KERNEL_SIZE = 2 # ModelTrainer DATA_AUGMENTATION = True MAX_ITER_NUM", "= 8 MAX_MODEL_SIZE = (1 << 25) MAX_LAYER_WIDTH = 4096 MAX_LAYERS = 200", "EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS = 5000 EXTRACT_PATH = \"glove/\" STORE_PATH", "CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE = 1 MODEL_LEN = 3 MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH", "local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED =", "local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image", "= 100 MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS = 5000 EXTRACT_PATH = \"glove/\" STORE_PATH =", "200 MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE = 128 LIMIT_MEMORY = False", "= namedtuple('GoogleDriveFile', ['google_drive_id', 'local_name']) class Constant: BACKEND = 'torch' # Data VALIDATION_SET_SIZE =", "True MAX_ITER_NUM = 200 MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE = 128", "4 BERT_TRAINER_BATCH_SIZE = 32 # text preprocessor EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH = 400", "100 MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS = 5000 EXTRACT_PATH = \"glove/\" STORE_PATH = ''", "= [50, 75, 100] DEFAULT_WIDTH_SEARCH = [64, 128, 256] # Model Defaults DENSE_DROPOUT_RATE", "0.25 MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE = 1 MODEL_LEN = 3", "MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE = 1 MODEL_LEN = 3 MLP_MODEL_LEN", "PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\", "GoogleDriveFile = namedtuple('GoogleDriveFile', ['google_drive_id', 'local_name']) class Constant: BACKEND = 'torch' # Data VALIDATION_SET_SIZE", "'local_name']) class Constant: BACKEND = 'torch' # Data VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES =", "GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED", "MAX_MODEL_NUM = 1000 BETA = 2.576 KERNEL_LAMBDA = 1.0 T_MIN = 0.0001 N_NEIGHBOURS", "MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE = 128 LIMIT_MEMORY = False SEARCH_MAX_ITER", "Model Defaults DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE =", "MAX_BATCH_SIZE = 128 LIMIT_MEMORY = False SEARCH_MAX_ITER = 200 # Text Classifier BERT_TRAINER_EPOCHS", "MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS = 5000 EXTRACT_PATH = \"glove/\" STORE_PATH = '' #", "[64, 128, 256] # Model Defaults DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE", "ModelTrainer DATA_AUGMENTATION = True MAX_ITER_NUM = 200 MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM = 5", "STORE_PATH = '' # Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED", "= 0.25 MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE = 1 MODEL_LEN =", "T_MIN = 0.0001 N_NEIGHBOURS = 8 MAX_MODEL_SIZE = (1 << 25) MAX_LAYER_WIDTH =", "EXTRACT_PATH = \"glove/\" STORE_PATH = '' # Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\", "CUTOUT_HOLES = 1 CUTOUT_RATIO = 0.5 # Searcher MAX_MODEL_NUM = 1000 BETA =", "GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image Resize MAX_IMAGE_SIZE = 128 * 128 # SYS Constant", "'linux' SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB = 'goog_colab' # Google drive downloader CHUNK_SIZE =", "5 MAX_BATCH_SIZE = 128 LIMIT_MEMORY = False SEARCH_MAX_ITER = 200 # Text Classifier", "0.5 # Searcher MAX_MODEL_NUM = 1000 BETA = 2.576 KERNEL_LAMBDA = 1.0 T_MIN", "= 0.5 CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE =", "BETA = 2.576 KERNEL_LAMBDA = 1.0 T_MIN = 0.0001 N_NEIGHBOURS = 8 MAX_MODEL_SIZE", "= 1 CUTOUT_RATIO = 0.5 # Searcher MAX_MODEL_NUM = 1000 BETA = 2.576", "= 2 DENSE_BLOCK_DISTANCE = 1 MODEL_LEN = 3 MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH =", "400 MAX_NB_WORDS = 5000 EXTRACT_PATH = \"glove/\" STORE_PATH = '' # Download file", "64 POOLING_KERNEL_SIZE = 2 # ModelTrainer DATA_AUGMENTATION = True MAX_ITER_NUM = 200 MIN_LOSS_DEC", "POOLING_KERNEL_SIZE = 2 # ModelTrainer DATA_AUGMENTATION = True MAX_ITER_NUM = 200 MIN_LOSS_DEC =", "= False SEARCH_MAX_ITER = 200 # Text Classifier BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE =", "= '' # Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED =", "# Image Resize MAX_IMAGE_SIZE = 128 * 128 # SYS Constant SYS_LINUX =", "1e-4 MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE = 128 LIMIT_MEMORY = False SEARCH_MAX_ITER = 200", "= 5 MAX_BATCH_SIZE = 128 LIMIT_MEMORY = False SEARCH_MAX_ITER = 200 # Text", "[50, 75, 100] DEFAULT_WIDTH_SEARCH = [64, 128, 256] # Model Defaults DENSE_DROPOUT_RATE =", "1 CUTOUT_RATIO = 0.5 # Searcher MAX_MODEL_NUM = 1000 BETA = 2.576 KERNEL_LAMBDA", "1000 BETA = 2.576 KERNEL_LAMBDA = 1.0 T_MIN = 0.0001 N_NEIGHBOURS = 8", "local_name='mbbc.pth') # Image Resize MAX_IMAGE_SIZE = 128 * 128 # SYS Constant SYS_LINUX", "4096 MAX_LAYERS = 200 # Grid Dimensions LENGTH_DIM = 0 WIDTH_DIM = 1", "Defaults DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE = 2", "Resize MAX_IMAGE_SIZE = 128 * 128 # SYS Constant SYS_LINUX = 'linux' SYS_WINDOWS", "namedtuple GoogleDriveFile = namedtuple('GoogleDriveFile', ['google_drive_id', 'local_name']) class Constant: BACKEND = 'torch' # Data", "class Constant: BACKEND = 'torch' # Data VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES = 1", "Image Resize MAX_IMAGE_SIZE = 128 * 128 # SYS Constant SYS_LINUX = 'linux'", "* 128 # SYS Constant SYS_LINUX = 'linux' SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB =", "\\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth')", "= 0.5 # Searcher MAX_MODEL_NUM = 1000 BETA = 2.576 KERNEL_LAMBDA = 1.0", "LENGTH_DIM = 0 WIDTH_DIM = 1 # Default Search Space DEFAULT_LENGTH_SEARCH = [50,", "= 0.25 CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE = 1 MODEL_LEN = 3 MLP_MODEL_LEN =", "= 128 LIMIT_MEMORY = False SEARCH_MAX_ITER = 200 # Text Classifier BERT_TRAINER_EPOCHS =", "= \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV',", "Space DEFAULT_LENGTH_SEARCH = [50, 75, 100] DEFAULT_WIDTH_SEARCH = [64, 128, 256] # Model", "= 200 # Grid Dimensions LENGTH_DIM = 0 WIDTH_DIM = 1 # Default", "BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE = 32 # text preprocessor EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH", "Constant SYS_LINUX = 'linux' SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB = 'goog_colab' # Google drive", "MAX_IMAGE_SIZE = 128 * 128 # SYS Constant SYS_LINUX = 'linux' SYS_WINDOWS =", "Data VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES = 1 CUTOUT_RATIO = 0.5 # Searcher MAX_MODEL_NUM", "= 1e-4 MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE = 128 LIMIT_MEMORY = False SEARCH_MAX_ITER =", "Constant: BACKEND = 'torch' # Data VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES = 1 CUTOUT_RATIO", "128 * 128 # SYS Constant SYS_LINUX = 'linux' SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB", "local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image Resize MAX_IMAGE_SIZE = 128 *", "= 'windows' SYS_GOOGLE_COLAB = 'goog_colab' # Google drive downloader CHUNK_SIZE = 32768 DOWNLOAD_URL", "256] # Model Defaults DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE = 0.25", "DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE = 2 DENSE_BLOCK_DISTANCE", "= 3 MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH = 5 MODEL_WIDTH = 64 POOLING_KERNEL_SIZE =", "= 'torch' # Data VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES = 1 CUTOUT_RATIO = 0.5", "LIMIT_MEMORY = False SEARCH_MAX_ITER = 200 # Text Classifier BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE", "= 32 # text preprocessor EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS =", "# Model Defaults DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE = 0.25 CONV_BLOCK_DISTANCE", "= 128 * 128 # SYS Constant SYS_LINUX = 'linux' SYS_WINDOWS = 'windows'", "# SYS Constant SYS_LINUX = 'linux' SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB = 'goog_colab' #", "# Data VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES = 1 CUTOUT_RATIO = 0.5 # Searcher", "WIDTH_DIM = 1 # Default Search Space DEFAULT_LENGTH_SEARCH = [50, 75, 100] DEFAULT_WIDTH_SEARCH", "DEFAULT_WIDTH_SEARCH = [64, 128, 256] # Model Defaults DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE =", "= 4096 MAX_LAYERS = 200 # Grid Dimensions LENGTH_DIM = 0 WIDTH_DIM =", "2 # ModelTrainer DATA_AUGMENTATION = True MAX_ITER_NUM = 200 MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM", "'windows' SYS_GOOGLE_COLAB = 'goog_colab' # Google drive downloader CHUNK_SIZE = 32768 DOWNLOAD_URL =", "DENSE_BLOCK_DISTANCE = 1 MODEL_LEN = 3 MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH = 5 MODEL_WIDTH", "= \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image Resize MAX_IMAGE_SIZE = 128 * 128 #", "preprocessor EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS = 5000 EXTRACT_PATH = \"glove/\"", "3 MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH = 5 MODEL_WIDTH = 64 POOLING_KERNEL_SIZE = 2", "128, 256] # Model Defaults DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE = 0.25 MLP_DROPOUT_RATE =", "\"glove/\" STORE_PATH = '' # Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt')", "Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt')", "collections import namedtuple GoogleDriveFile = namedtuple('GoogleDriveFile', ['google_drive_id', 'local_name']) class Constant: BACKEND = 'torch'", "DATA_AUGMENTATION = True MAX_ITER_NUM = 200 MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE", "Classifier BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE = 32 # text preprocessor EMBEDDING_DIM = 100", "BERT_TRAINER_BATCH_SIZE = 32 # text preprocessor EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS", "Dimensions LENGTH_DIM = 0 WIDTH_DIM = 1 # Default Search Space DEFAULT_LENGTH_SEARCH =", "# ModelTrainer DATA_AUGMENTATION = True MAX_ITER_NUM = 200 MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM =", "text preprocessor EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS = 5000 EXTRACT_PATH =", "100] DEFAULT_WIDTH_SEARCH = [64, 128, 256] # Model Defaults DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE", "from collections import namedtuple GoogleDriveFile = namedtuple('GoogleDriveFile', ['google_drive_id', 'local_name']) class Constant: BACKEND =", "MAX_LAYER_WIDTH = 4096 MAX_LAYERS = 200 # Grid Dimensions LENGTH_DIM = 0 WIDTH_DIM", "GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') #", "PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image Resize", "= True MAX_ITER_NUM = 200 MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE =", "2 DENSE_BLOCK_DISTANCE = 1 MODEL_LEN = 3 MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH = 5", "1 # Default Search Space DEFAULT_LENGTH_SEARCH = [50, 75, 100] DEFAULT_WIDTH_SEARCH = [64,", "PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image Resize MAX_IMAGE_SIZE = 128 * 128", "= [64, 128, 256] # Model Defaults DENSE_DROPOUT_RATE = 0.5 CONV_DROPOUT_RATE = 0.25", "= 1 # Default Search Space DEFAULT_LENGTH_SEARCH = [50, 75, 100] DEFAULT_WIDTH_SEARCH =", "= 2.576 KERNEL_LAMBDA = 1.0 T_MIN = 0.0001 N_NEIGHBOURS = 8 MAX_MODEL_SIZE =", "= 5 MODEL_WIDTH = 64 POOLING_KERNEL_SIZE = 2 # ModelTrainer DATA_AUGMENTATION = True", "# Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR',", "MAX_MODEL_SIZE = (1 << 25) MAX_LAYER_WIDTH = 4096 MAX_LAYERS = 200 # Grid", "200 # Grid Dimensions LENGTH_DIM = 0 WIDTH_DIM = 1 # Default Search", "0 WIDTH_DIM = 1 # Default Search Space DEFAULT_LENGTH_SEARCH = [50, 75, 100]", "namedtuple('GoogleDriveFile', ['google_drive_id', 'local_name']) class Constant: BACKEND = 'torch' # Data VALIDATION_SET_SIZE = 0.08333", "file name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED", "128 # SYS Constant SYS_LINUX = 'linux' SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB = 'goog_colab'", "= 0.08333 CUTOUT_HOLES = 1 CUTOUT_RATIO = 0.5 # Searcher MAX_MODEL_NUM = 1000", "# Grid Dimensions LENGTH_DIM = 0 WIDTH_DIM = 1 # Default Search Space", "= \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG',", "SYS Constant SYS_LINUX = 'linux' SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB = 'goog_colab' # Google", "2.576 KERNEL_LAMBDA = 1.0 T_MIN = 0.0001 N_NEIGHBOURS = 8 MAX_MODEL_SIZE = (1", "Search Space DEFAULT_LENGTH_SEARCH = [50, 75, 100] DEFAULT_WIDTH_SEARCH = [64, 128, 256] #", "0.0001 N_NEIGHBOURS = 8 MAX_MODEL_SIZE = (1 << 25) MAX_LAYER_WIDTH = 4096 MAX_LAYERS", "MAX_NB_WORDS = 5000 EXTRACT_PATH = \"glove/\" STORE_PATH = '' # Download file name", "0.08333 CUTOUT_HOLES = 1 CUTOUT_RATIO = 0.5 # Searcher MAX_MODEL_NUM = 1000 BETA", "Searcher MAX_MODEL_NUM = 1000 BETA = 2.576 KERNEL_LAMBDA = 1.0 T_MIN = 0.0001", "False SEARCH_MAX_ITER = 200 # Text Classifier BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE = 32", "\\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image Resize MAX_IMAGE_SIZE =", "\\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image Resize MAX_IMAGE_SIZE = 128 * 128 # SYS", "name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt') PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED =", "= 200 MIN_LOSS_DEC = 1e-4 MAX_NO_IMPROVEMENT_NUM = 5 MAX_BATCH_SIZE = 128 LIMIT_MEMORY =", "1 MODEL_LEN = 3 MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH = 5 MODEL_WIDTH = 64", "= 1 MODEL_LEN = 3 MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH = 5 MODEL_WIDTH =", "= 'linux' SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB = 'goog_colab' # Google drive downloader CHUNK_SIZE", "MLP_MODEL_LEN = 3 MLP_MODEL_WIDTH = 5 MODEL_WIDTH = 64 POOLING_KERNEL_SIZE = 2 #", "GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image Resize MAX_IMAGE_SIZE = 128", "# Searcher MAX_MODEL_NUM = 1000 BETA = 2.576 KERNEL_LAMBDA = 1.0 T_MIN =", "MAX_LAYERS = 200 # Grid Dimensions LENGTH_DIM = 0 WIDTH_DIM = 1 #", "= 0 WIDTH_DIM = 1 # Default Search Space DEFAULT_LENGTH_SEARCH = [50, 75,", "import namedtuple GoogleDriveFile = namedtuple('GoogleDriveFile', ['google_drive_id', 'local_name']) class Constant: BACKEND = 'torch' #", "= 1.0 T_MIN = 0.0001 N_NEIGHBOURS = 8 MAX_MODEL_SIZE = (1 << 25)", "MODEL_WIDTH = 64 POOLING_KERNEL_SIZE = 2 # ModelTrainer DATA_AUGMENTATION = True MAX_ITER_NUM =", "= \"glove/\" STORE_PATH = '' # Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj',", "= 3 MLP_MODEL_WIDTH = 5 MODEL_WIDTH = 64 POOLING_KERNEL_SIZE = 2 # ModelTrainer", "32 # text preprocessor EMBEDDING_DIM = 100 MAX_SEQUENCE_LENGTH = 400 MAX_NB_WORDS = 5000", "= \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth') # Image Resize MAX_IMAGE_SIZE", "SYS_WINDOWS = 'windows' SYS_GOOGLE_COLAB = 'goog_colab' # Google drive downloader CHUNK_SIZE = 32768", "['google_drive_id', 'local_name']) class Constant: BACKEND = 'torch' # Data VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES", "= (1 << 25) MAX_LAYER_WIDTH = 4096 MAX_LAYERS = 200 # Grid Dimensions", "5000 EXTRACT_PATH = \"glove/\" STORE_PATH = '' # Download file name PRETRAINED_VOCAB_BERT_BASE_UNCASED =", "3 MLP_MODEL_WIDTH = 5 MODEL_WIDTH = 64 POOLING_KERNEL_SIZE = 2 # ModelTrainer DATA_AUGMENTATION", "128 LIMIT_MEMORY = False SEARCH_MAX_ITER = 200 # Text Classifier BERT_TRAINER_EPOCHS = 4", "'torch' # Data VALIDATION_SET_SIZE = 0.08333 CUTOUT_HOLES = 1 CUTOUT_RATIO = 0.5 #", "Grid Dimensions LENGTH_DIM = 0 WIDTH_DIM = 1 # Default Search Space DEFAULT_LENGTH_SEARCH", "PRETRAINED_VOCAB_BERT_BASE_CASED = \\ GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt') PRETRAINED_MODEL_BERT_BASE_UNCASED = \\ GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth') PRETRAINED_MODEL_BERT_BASE_CASED = \\", "200 # Text Classifier BERT_TRAINER_EPOCHS = 4 BERT_TRAINER_BATCH_SIZE = 32 # text preprocessor", "8 MAX_MODEL_SIZE = (1 << 25) MAX_LAYER_WIDTH = 4096 MAX_LAYERS = 200 #", "= 0.0001 N_NEIGHBOURS = 8 MAX_MODEL_SIZE = (1 << 25) MAX_LAYER_WIDTH = 4096", "DEFAULT_LENGTH_SEARCH = [50, 75, 100] DEFAULT_WIDTH_SEARCH = [64, 128, 256] # Model Defaults", "= 64 POOLING_KERNEL_SIZE = 2 # ModelTrainer DATA_AUGMENTATION = True MAX_ITER_NUM = 200", "25) MAX_LAYER_WIDTH = 4096 MAX_LAYERS = 200 # Grid Dimensions LENGTH_DIM = 0", "= 400 MAX_NB_WORDS = 5000 EXTRACT_PATH = \"glove/\" STORE_PATH = '' # Download" ]
[ "path specification object (instance of path.PathSpec). is_root: optional boolean value to indicate if", "abc from dfvfs.resolver import resolver class Directory(object): \"\"\"Class that implements the VFS directory", "\"\"\"The full path of the linked file entry.\"\"\" return u'' @abc.abstractproperty def name(self):", "include the full path.\"\"\" @property def number_of_sub_file_entries(self): \"\"\"The number of sub file entries.\"\"\"", "\"\"\"Retrieves the parent file entry.\"\"\" def GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves a sub file", "case_sensitive=True): \"\"\"Retrieves a sub file entry by name.\"\"\" name_lower = name.lower() matching_sub_file_entry =", "Args: file_system: the file system object (instance of vfs.FileSystem). path_spec: the path specification", "getattr(self, u'TYPE_INDICATOR', None) if type_indicator is None: raise NotImplementedError( u'Invalid file system missing", "if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DEVICE def IsDirectory(self):", "entry is allocated.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.is_allocated def", "in self._directory.entries) @abc.abstractproperty def sub_file_entries(self): \"\"\"The sub file entries (generator of instance of", "entry is a virtual file entry emulated by the corresponding file system. The", "def IsPipe(self): \"\"\"Determines if the file entry is a pipe.\"\"\" if self._stat_object is", "optional boolean value to indicate if the file entry is a virtual file", "regular file, a directory or file system metadata. \"\"\" import abc from dfvfs.resolver", "return self._stat_object def IsAllocated(self): \"\"\"Determines if the file entry is allocated.\"\"\" if self._stat_object", "self._GetStat() return self._stat_object.type == self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines if the file entry is", "full path.\"\"\" @property def number_of_sub_file_entries(self): \"\"\"The number of sub file entries.\"\"\" if self._directory", "entries (generator of instance of path.OSPathSpec).\"\"\" for entry in self._EntriesGenerator(): yield entry class", "is_virtual self._resolver_context = resolver_context self._stat_object = None self.path_spec = path_spec self._file_system.Open(path_spec=path_spec) def __del__(self):", "self._file_system.Close() self._file_system = None @abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves the directory object (instance of", "(instance of path.PathSpec). \"\"\" super(Directory, self).__init__() self._entries = None self._file_system = file_system self.path_spec", "path_spec self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans up the file entry object.\"\"\" self._file_system.Close() self._file_system =", "use len(self._directory.entries) since entries is a generator. return sum(1 for path_spec in self._directory.entries)", "entry class FileEntry(object): \"\"\"Class that implements the VFS file entry object interface.\"\"\" def", "for sub_file_entry in self.sub_file_entries: if sub_file_entry.name == name: return sub_file_entry if not case_sensitive", "path.OSPathSpec).\"\"\" for entry in self._EntriesGenerator(): yield entry class FileEntry(object): \"\"\"Class that implements the", "if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines if", "if the file entry is a link.\"\"\" if self._stat_object is None: self._stat_object =", "allocated.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines", "def IsAllocated(self): \"\"\"Determines if the file entry is allocated.\"\"\" if self._stat_object is None:", "if type_indicator is None: raise NotImplementedError( u'Invalid file system missing type indicator.') return", "number of sub file entries.\"\"\" if self._directory is None: self._directory = self._GetDirectory() if", "is a pipe.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type ==", "u'' @abc.abstractproperty def name(self): \"\"\"The name of the file entry, which does not", "missing type indicator.') return type_indicator def GetFileObject(self): \"\"\"Retrieves the file-like object (instance of", "a file.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_FILE", "@abc.abstractproperty def sub_file_entries(self): \"\"\"The sub file entries (generator of instance of vfs.FileEntry).\"\"\" @property", "def _GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" @property def link(self): \"\"\"The", "self._stat_object.type == self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines if the file entry is virtual (emulated", "specification object (instance of path.PathSpec). \"\"\" super(Directory, self).__init__() self._entries = None self._file_system =", "\"\"\"The number of sub file entries.\"\"\" if self._directory is None: self._directory = self._GetDirectory()", "self._GetDirectory() if self._directory is None: return 0 # We cannot use len(self._directory.entries) since", "_EntriesGenerator(self): \"\"\"Retrieves directory entries. Since a directory can contain a vast number of", "A path specification (instance of path.PathSpec). \"\"\" @property def entries(self): \"\"\"The entries (generator", "is False. \"\"\" super(FileEntry, self).__init__() self._directory = None self._file_system = file_system self._is_root =", "NotImplementedError( u'Invalid file system missing type indicator.') return type_indicator def GetFileObject(self): \"\"\"Retrieves the", "for a symbolic link.\"\"\" return @abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves the parent file entry.\"\"\"", "the file entry is a pipe.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat()", "def IsVirtual(self): \"\"\"Determines if the file entry is virtual (emulated by dfVFS).\"\"\" return", "IsRoot(self): \"\"\"Determines if the file entry is the root file entry.\"\"\" return self._is_root", "self._file_system = None @abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves the directory object (instance of vfs.Directory).\"\"\"", "is a generator. return sum(1 for path_spec in self._directory.entries) @abc.abstractproperty def sub_file_entries(self): \"\"\"The", "dfvfs.resolver import resolver class Directory(object): \"\"\"Class that implements the VFS directory object interface.\"\"\"", "the file entry is the root file entry.\"\"\" return self._is_root def IsSocket(self): \"\"\"Determines", "of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves the file system (instance", "return self._stat_object.type == self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines if the file entry is the", "self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines", "is False. is_virtual: optional boolean value to indicate if the file entry is", "IsVirtual(self): \"\"\"Determines if the file entry is virtual (emulated by dfVFS).\"\"\" return self._is_virtual", "(instance of path.PathSpec). is_root: optional boolean value to indicate if the file entry", "the corresponding file system. The default is False. is_virtual: optional boolean value to", "-*- coding: utf-8 -*- \"\"\"The Virtual File System (VFS) file entry object interface.", "\"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" if self._stat_object is None: self._stat_object =", "self.path_spec = path_spec @abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves directory entries. Since a directory can", "type_indicator(self): \"\"\"The type indicator.\"\"\" type_indicator = getattr(self, u'TYPE_INDICATOR', None) if type_indicator is None:", "various file system elements like a regular file, a directory or file system", "path of the linked file entry.\"\"\" return u'' @abc.abstractproperty def name(self): \"\"\"The name", "\"\"\"The sub file entries (generator of instance of vfs.FileEntry).\"\"\" @property def type_indicator(self): \"\"\"The", "(instance of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves the file system", "GetFileObject(self): \"\"\"Retrieves the file-like object (instance of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def", "a generator is more memory efficient. Yields: A path specification (instance of path.PathSpec).", "file_system, path_spec, is_root=False, is_virtual=False): \"\"\"Initializes the file entry object. Args: resolver_context: the resolver", "if the file entry is a virtual file entry emulated by the corresponding", "of sub file entries.\"\"\" if self._directory is None: self._directory = self._GetDirectory() if self._directory", "None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines if the", "is a directory.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type ==", "resolver_context self._stat_object = None self.path_spec = path_spec self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans up the", "\"\"\"Retrieves a sub file entry by name.\"\"\" name_lower = name.lower() matching_sub_file_entry = None", "name_lower: if not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return matching_sub_file_entry def GetStat(self): \"\"\"Retrieves the", "link.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_LINK def", "self._stat_object.type == self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines if the file entry is a directory.\"\"\"", "False. \"\"\" super(FileEntry, self).__init__() self._directory = None self._file_system = file_system self._is_root = is_root", "of path.OSPathSpec).\"\"\" for entry in self._EntriesGenerator(): yield entry class FileEntry(object): \"\"\"Class that implements", "self.path_spec = path_spec self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans up the file entry object.\"\"\" self._file_system.Close()", "self._GetStat() return self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines if the file entry is a device.\"\"\"", "return type_indicator def GetFileObject(self): \"\"\"Retrieves the file-like object (instance of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject(", "file entry is the root file entry of the corresponding file system. The", "type_indicator is None: raise NotImplementedError( u'Invalid file system missing type indicator.') return type_indicator", "\"\"\"Determines if the file entry is allocated.\"\"\" if self._stat_object is None: self._stat_object =", "None: self._stat_object = self._GetStat() return self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines if the file entry", "name_lower = name.lower() matching_sub_file_entry = None for sub_file_entry in self.sub_file_entries: if sub_file_entry.name ==", "= self._GetDirectory() if self._directory is None: return 0 # We cannot use len(self._directory.entries)", "entry of the corresponding file system. The default is False. is_virtual: optional boolean", "def IsSocket(self): \"\"\"Determines if the file entry is a socket.\"\"\" if self._stat_object is", "of instance of vfs.FileEntry).\"\"\" @property def type_indicator(self): \"\"\"The type indicator.\"\"\" type_indicator = getattr(self,", "def GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" if self._stat_object is None:", "\"\"\"Determines if the file entry is a device.\"\"\" if self._stat_object is None: self._stat_object", "efficient. Yields: A path specification (instance of path.PathSpec). \"\"\" @property def entries(self): \"\"\"The", "if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_FILE def IsLink(self):", "a device.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DEVICE", "interface. The file entry can be various file system elements like a regular", "= None self._file_system = file_system self._is_root = is_root self._is_virtual = is_virtual self._resolver_context =", "self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object def IsAllocated(self): \"\"\"Determines if the", "be various file system elements like a regular file, a directory or file", "vfs.FileEntry).\"\"\" @property def type_indicator(self): \"\"\"The type indicator.\"\"\" type_indicator = getattr(self, u'TYPE_INDICATOR', None) if", "__init__(self, file_system, path_spec): \"\"\"Initializes the directory object. Args: file_system: the file system object", "(instance of vfs.FileSystem). path_spec: the path specification object (instance of path.PathSpec). \"\"\" super(Directory,", "yield entry class FileEntry(object): \"\"\"Class that implements the VFS file entry object interface.\"\"\"", "if the file entry is the root file entry of the corresponding file", "self._stat_object = self._GetStat() return self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines if the file entry is", "@property def entries(self): \"\"\"The entries (generator of instance of path.OSPathSpec).\"\"\" for entry in", "super(FileEntry, self).__init__() self._directory = None self._file_system = file_system self._is_root = is_root self._is_virtual =", "def GetFileSystem(self): \"\"\"Retrieves the file system (instance of vfs.FileSystem).\"\"\" return self._file_system def GetLinkedFileEntry(self):", "self._stat_object = None self.path_spec = path_spec self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans up the file", "a regular file, a directory or file system metadata. \"\"\" import abc from", "file system missing type indicator.') return type_indicator def GetFileObject(self): \"\"\"Retrieves the file-like object", "the directory object (instance of vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self): \"\"\"Retrieves the stat object", "== self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines if the file entry is a pipe.\"\"\" if", "entry is the root file entry.\"\"\" return self._is_root def IsSocket(self): \"\"\"Determines if the", "using a generator is more memory efficient. Yields: A path specification (instance of", "a sub file entry by name.\"\"\" name_lower = name.lower() matching_sub_file_entry = None for", "sub_file_entry in self.sub_file_entries: if sub_file_entry.name == name: return sub_file_entry if not case_sensitive and", "e.g. for a symbolic link.\"\"\" return @abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves the parent file", "not case_sensitive and sub_file_entry.name.lower() == name_lower: if not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return", "super(Directory, self).__init__() self._entries = None self._file_system = file_system self.path_spec = path_spec @abc.abstractmethod def", "from dfvfs.resolver import resolver class Directory(object): \"\"\"Class that implements the VFS directory object", "(generator of instance of path.OSPathSpec).\"\"\" for entry in self._EntriesGenerator(): yield entry class FileEntry(object):", "sub_file_entry if not case_sensitive and sub_file_entry.name.lower() == name_lower: if not matching_sub_file_entry: matching_sub_file_entry =", "class FileEntry(object): \"\"\"Class that implements the VFS file entry object interface.\"\"\" def __init__(", "since entries is a generator. return sum(1 for path_spec in self._directory.entries) @abc.abstractproperty def", "of vfs.FileSystem).\"\"\" return self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves the linked file entry, e.g. for", "file entry is a file.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return", "file entry by name.\"\"\" name_lower = name.lower() matching_sub_file_entry = None for sub_file_entry in", "self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines if the file entry is virtual (emulated by dfVFS).\"\"\"", "instance of vfs.FileEntry).\"\"\" @property def type_indicator(self): \"\"\"The type indicator.\"\"\" type_indicator = getattr(self, u'TYPE_INDICATOR',", "is a socket.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type ==", "socket.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_SOCKET def", "path_spec: the path specification object (instance of path.PathSpec). \"\"\" super(Directory, self).__init__() self._entries =", "object (instance of vfs.VFSStat).\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object", "does not include the full path.\"\"\" @property def number_of_sub_file_entries(self): \"\"\"The number of sub", "file entry is allocated.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.is_allocated", "\"\"\"Determines if the file entry is the root file entry.\"\"\" return self._is_root def", "IsSocket(self): \"\"\"Determines if the file entry is a socket.\"\"\" if self._stat_object is None:", "GetLinkedFileEntry(self): \"\"\"Retrieves the linked file entry, e.g. for a symbolic link.\"\"\" return @abc.abstractmethod", "type_indicator = getattr(self, u'TYPE_INDICATOR', None) if type_indicator is None: raise NotImplementedError( u'Invalid file", "entry object.\"\"\" self._file_system.Close() self._file_system = None @abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves the directory object", "= None self.path_spec = path_spec self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans up the file entry", "default is False. \"\"\" super(FileEntry, self).__init__() self._directory = None self._file_system = file_system self._is_root", "path specification object (instance of path.PathSpec). \"\"\" super(Directory, self).__init__() self._entries = None self._file_system", "self._is_root def IsSocket(self): \"\"\"Determines if the file entry is a socket.\"\"\" if self._stat_object", "entry, e.g. for a symbolic link.\"\"\" return @abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves the parent", "name of the file entry, which does not include the full path.\"\"\" @property", "entries using a generator is more memory efficient. Yields: A path specification (instance", "IsPipe(self): \"\"\"Determines if the file entry is a pipe.\"\"\" if self._stat_object is None:", "the file entry, which does not include the full path.\"\"\" @property def number_of_sub_file_entries(self):", "object.\"\"\" self._file_system.Close() self._file_system = None @abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves the directory object (instance", "entry emulated by the corresponding file system. The default is False. \"\"\" super(FileEntry,", "is a link.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type ==", "IsAllocated(self): \"\"\"Determines if the file entry is allocated.\"\"\" if self._stat_object is None: self._stat_object", "a link.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_LINK", "of path.PathSpec). \"\"\" super(Directory, self).__init__() self._entries = None self._file_system = file_system self.path_spec =", "the file entry is a link.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat()", "if sub_file_entry.name == name: return sub_file_entry if not case_sensitive and sub_file_entry.name.lower() == name_lower:", "vfs.FileSystem).\"\"\" return self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves the linked file entry, e.g. for a", "path_spec): \"\"\"Initializes the directory object. Args: file_system: the file system object (instance of", "is_root: optional boolean value to indicate if the file entry is the root", "entry.\"\"\" return u'' @abc.abstractproperty def name(self): \"\"\"The name of the file entry, which", "\"\"\"Determines if the file entry is a socket.\"\"\" if self._stat_object is None: self._stat_object", "entry.\"\"\" def GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves a sub file entry by name.\"\"\" name_lower", "directory object interface.\"\"\" def __init__(self, file_system, path_spec): \"\"\"Initializes the directory object. Args: file_system:", "for entry in self._EntriesGenerator(): yield entry class FileEntry(object): \"\"\"Class that implements the VFS", "is_root self._is_virtual = is_virtual self._resolver_context = resolver_context self._stat_object = None self.path_spec = path_spec", "virtual file entry emulated by the corresponding file system. The default is False.", "= self._GetStat() return self._stat_object.type == self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines if the file entry", "elements like a regular file, a directory or file system metadata. \"\"\" import", "of vfs.VFSStat).\"\"\" @property def link(self): \"\"\"The full path of the linked file entry.\"\"\"", "link.\"\"\" return @abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves the parent file entry.\"\"\" def GetSubFileEntryByName(self, name,", "is allocated.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.is_allocated def IsDevice(self):", "@property def link(self): \"\"\"The full path of the linked file entry.\"\"\" return u''", "of the file entry, which does not include the full path.\"\"\" @property def", "(instance of path.PathSpec). \"\"\" @property def entries(self): \"\"\"The entries (generator of instance of", "parent file entry.\"\"\" def GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves a sub file entry by", "corresponding file system. The default is False. is_virtual: optional boolean value to indicate", "None self.path_spec = path_spec self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans up the file entry object.\"\"\"", "self._directory.entries) @abc.abstractproperty def sub_file_entries(self): \"\"\"The sub file entries (generator of instance of vfs.FileEntry).\"\"\"", "that implements the VFS directory object interface.\"\"\" def __init__(self, file_system, path_spec): \"\"\"Initializes the", "matching_sub_file_entry = None for sub_file_entry in self.sub_file_entries: if sub_file_entry.name == name: return sub_file_entry", "the stat object (instance of vfs.VFSStat).\"\"\" if self._stat_object is None: self._stat_object = self._GetStat()", "= self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines if the file entry", "is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines if", "\"\"\"Cleans up the file entry object.\"\"\" self._file_system.Close() self._file_system = None @abc.abstractmethod def _GetDirectory(self):", "The default is False. \"\"\" super(FileEntry, self).__init__() self._directory = None self._file_system = file_system", "system missing type indicator.') return type_indicator def GetFileObject(self): \"\"\"Retrieves the file-like object (instance", "metadata. \"\"\" import abc from dfvfs.resolver import resolver class Directory(object): \"\"\"Class that implements", "self._file_system = file_system self._is_root = is_root self._is_virtual = is_virtual self._resolver_context = resolver_context self._stat_object", "def _EntriesGenerator(self): \"\"\"Retrieves directory entries. Since a directory can contain a vast number", "self._GetStat() return self._stat_object.type == self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines if the file entry is", "entry is a link.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type", "file entry, e.g. for a symbolic link.\"\"\" return @abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves the", "vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" @property def", "file-like object (instance of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves the", "file entry.\"\"\" return self._is_root def IsSocket(self): \"\"\"Determines if the file entry is a", "= self._GetStat() return self._stat_object.type == self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines if the file entry", "Yields: A path specification (instance of path.PathSpec). \"\"\" @property def entries(self): \"\"\"The entries", "resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves the file system (instance of vfs.FileSystem).\"\"\" return self._file_system def", "path_spec: the path specification object (instance of path.PathSpec). is_root: optional boolean value to", "# We cannot use len(self._directory.entries) since entries is a generator. return sum(1 for", "entries. Since a directory can contain a vast number of entries using a", "full path of the linked file entry.\"\"\" return u'' @abc.abstractproperty def name(self): \"\"\"The", "generator. return sum(1 for path_spec in self._directory.entries) @abc.abstractproperty def sub_file_entries(self): \"\"\"The sub file", "is_virtual=False): \"\"\"Initializes the file entry object. Args: resolver_context: the resolver context (instance of", "self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines", "Virtual File System (VFS) file entry object interface. The file entry can be", "like a regular file, a directory or file system metadata. \"\"\" import abc", "interface.\"\"\" def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): \"\"\"Initializes the file entry", "name(self): \"\"\"The name of the file entry, which does not include the full", "self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines if the file entry is a link.\"\"\" if self._stat_object", "specification (instance of path.PathSpec). \"\"\" @property def entries(self): \"\"\"The entries (generator of instance", "self._stat_object.type == self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines if the file entry is a pipe.\"\"\"", "@abc.abstractproperty def name(self): \"\"\"The name of the file entry, which does not include", "is a file.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type ==", "of vfs.VFSStat).\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object def IsAllocated(self):", "(generator of instance of vfs.FileEntry).\"\"\" @property def type_indicator(self): \"\"\"The type indicator.\"\"\" type_indicator =", "file entry, which does not include the full path.\"\"\" @property def number_of_sub_file_entries(self): \"\"\"The", "in self._EntriesGenerator(): yield entry class FileEntry(object): \"\"\"Class that implements the VFS file entry", "the file entry is a device.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat()", "file entry is a socket.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return", "object (instance of vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self): \"\"\"Retrieves the stat object (instance of", "\"\"\"Determines if the file entry is a file.\"\"\" if self._stat_object is None: self._stat_object", "\"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" @property def link(self): \"\"\"The full path", "if not case_sensitive and sub_file_entry.name.lower() == name_lower: if not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry", "def IsFile(self): \"\"\"Determines if the file entry is a file.\"\"\" if self._stat_object is", "indicator.\"\"\" type_indicator = getattr(self, u'TYPE_INDICATOR', None) if type_indicator is None: raise NotImplementedError( u'Invalid", "object (instance of vfs.FileSystem). path_spec: the path specification object (instance of path.PathSpec). \"\"\"", "= file_system self._is_root = is_root self._is_virtual = is_virtual self._resolver_context = resolver_context self._stat_object =", "return self._stat_object.type == self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines if the file entry is a", "IsFile(self): \"\"\"Determines if the file entry is a file.\"\"\" if self._stat_object is None:", "if self._directory is None: self._directory = self._GetDirectory() if self._directory is None: return 0", "memory efficient. Yields: A path specification (instance of path.PathSpec). \"\"\" @property def entries(self):", "directory.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DIRECTORY def", "self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines if the file", "indicate if the file entry is the root file entry of the corresponding", "VFS file entry object interface.\"\"\" def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False):", "sub file entry by name.\"\"\" name_lower = name.lower() matching_sub_file_entry = None for sub_file_entry", "sub file entries (generator of instance of vfs.FileEntry).\"\"\" @property def type_indicator(self): \"\"\"The type", "entry is a device.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type", "the VFS directory object interface.\"\"\" def __init__(self, file_system, path_spec): \"\"\"Initializes the directory object.", "self._GetStat() return self._stat_object def IsAllocated(self): \"\"\"Determines if the file entry is allocated.\"\"\" if", "matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return matching_sub_file_entry def GetStat(self): \"\"\"Retrieves the stat object (instance", "Since a directory can contain a vast number of entries using a generator", "can contain a vast number of entries using a generator is more memory", "entry can be various file system elements like a regular file, a directory", "\"\"\" super(FileEntry, self).__init__() self._directory = None self._file_system = file_system self._is_root = is_root self._is_virtual", "the file entry is a directory.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat()", "self._stat_object.type == self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines if the file entry is a link.\"\"\"", "== name: return sub_file_entry if not case_sensitive and sub_file_entry.name.lower() == name_lower: if not", "path_spec in self._directory.entries) @abc.abstractproperty def sub_file_entries(self): \"\"\"The sub file entries (generator of instance", "the path specification object (instance of path.PathSpec). \"\"\" super(Directory, self).__init__() self._entries = None", "object interface.\"\"\" def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): \"\"\"Initializes the file", "def type_indicator(self): \"\"\"The type indicator.\"\"\" type_indicator = getattr(self, u'TYPE_INDICATOR', None) if type_indicator is", "boolean value to indicate if the file entry is the root file entry", "if the file entry is a directory.\"\"\" if self._stat_object is None: self._stat_object =", "self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines if the file entry is", "entry object interface. The file entry can be various file system elements like", "file system elements like a regular file, a directory or file system metadata.", "more memory efficient. Yields: A path specification (instance of path.PathSpec). \"\"\" @property def", "def GetLinkedFileEntry(self): \"\"\"Retrieves the linked file entry, e.g. for a symbolic link.\"\"\" return", "name.lower() matching_sub_file_entry = None for sub_file_entry in self.sub_file_entries: if sub_file_entry.name == name: return", "file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves the file system (instance of", "number of entries using a generator is more memory efficient. Yields: A path", "\"\"\"The entries (generator of instance of path.OSPathSpec).\"\"\" for entry in self._EntriesGenerator(): yield entry", "of path.PathSpec). is_root: optional boolean value to indicate if the file entry is", "path_spec @abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves directory entries. Since a directory can contain a", "directory entries. Since a directory can contain a vast number of entries using", "emulated by the corresponding file system. The default is False. \"\"\" super(FileEntry, self).__init__()", "return self._stat_object.type == self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines if the file entry is a", "= self._GetStat() return self._stat_object.type == self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines if the file entry", "def IsDevice(self): \"\"\"Determines if the file entry is a device.\"\"\" if self._stat_object is", "\"\"\"Determines if the file entry is a link.\"\"\" if self._stat_object is None: self._stat_object", "directory object. Args: file_system: the file system object (instance of vfs.FileSystem). path_spec: the", "None: self._directory = self._GetDirectory() if self._directory is None: return 0 # We cannot", "the file entry is a socket.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat()", "__del__(self): \"\"\"Cleans up the file entry object.\"\"\" self._file_system.Close() self._file_system = None @abc.abstractmethod def", "which does not include the full path.\"\"\" @property def number_of_sub_file_entries(self): \"\"\"The number of", "root file entry.\"\"\" return self._is_root def IsSocket(self): \"\"\"Determines if the file entry is", "None for sub_file_entry in self.sub_file_entries: if sub_file_entry.name == name: return sub_file_entry if not", "self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines if the file", "interface.\"\"\" def __init__(self, file_system, path_spec): \"\"\"Initializes the directory object. Args: file_system: the file", "None: self._stat_object = self._GetStat() return self._stat_object def IsAllocated(self): \"\"\"Determines if the file entry", "object (instance of path.PathSpec). is_root: optional boolean value to indicate if the file", "entries is a generator. return sum(1 for path_spec in self._directory.entries) @abc.abstractproperty def sub_file_entries(self):", "(instance of vfs.VFSStat).\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object def", "is more memory efficient. Yields: A path specification (instance of path.PathSpec). \"\"\" @property", "import resolver class Directory(object): \"\"\"Class that implements the VFS directory object interface.\"\"\" def", "if the file entry is allocated.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat()", "of vfs.FileSystem). path_spec: the path specification object (instance of path.PathSpec). is_root: optional boolean", "= path_spec self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans up the file entry object.\"\"\" self._file_system.Close() self._file_system", "None self._file_system = file_system self._is_root = is_root self._is_virtual = is_virtual self._resolver_context = resolver_context", "None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines if the", "self._stat_object.type == self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines if the file entry is the root", "def GetParentFileEntry(self): \"\"\"Retrieves the parent file entry.\"\"\" def GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves a", "root file entry of the corresponding file system. The default is False. is_virtual:", "self).__init__() self._directory = None self._file_system = file_system self._is_root = is_root self._is_virtual = is_virtual", "entry in self._EntriesGenerator(): yield entry class FileEntry(object): \"\"\"Class that implements the VFS file", "indicator.') return type_indicator def GetFileObject(self): \"\"\"Retrieves the file-like object (instance of file_io.FileIO).\"\"\" return", "specification object (instance of path.PathSpec). is_root: optional boolean value to indicate if the", "The file entry can be various file system elements like a regular file,", "the stat object (instance of vfs.VFSStat).\"\"\" @property def link(self): \"\"\"The full path of", "@abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves the directory object (instance of vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self):", "None: raise NotImplementedError( u'Invalid file system missing type indicator.') return type_indicator def GetFileObject(self):", "file entry.\"\"\" return u'' @abc.abstractproperty def name(self): \"\"\"The name of the file entry,", "= None for sub_file_entry in self.sub_file_entries: if sub_file_entry.name == name: return sub_file_entry if", "file entry is the root file entry.\"\"\" return self._is_root def IsSocket(self): \"\"\"Determines if", "is a virtual file entry emulated by the corresponding file system. The default", "object (instance of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves the file", "@abc.abstractmethod def _GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" @property def link(self):", "resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves the file system (instance of vfs.FileSystem).\"\"\" return", "boolean value to indicate if the file entry is a virtual file entry", "self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines if the file entry is the root file entry.\"\"\"", "def GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves a sub file entry by name.\"\"\" name_lower =", "self._resolver_context = resolver_context self._stat_object = None self.path_spec = path_spec self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans", "file_system self.path_spec = path_spec @abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves directory entries. Since a directory", "self._directory = None self._file_system = file_system self._is_root = is_root self._is_virtual = is_virtual self._resolver_context", "file system metadata. \"\"\" import abc from dfvfs.resolver import resolver class Directory(object): \"\"\"Class", "name, case_sensitive=True): \"\"\"Retrieves a sub file entry by name.\"\"\" name_lower = name.lower() matching_sub_file_entry", "link(self): \"\"\"The full path of the linked file entry.\"\"\" return u'' @abc.abstractproperty def", "self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines if the file", "to indicate if the file entry is a virtual file entry emulated by", "system elements like a regular file, a directory or file system metadata. \"\"\"", "file system object (instance of vfs.FileSystem). path_spec: the path specification object (instance of", "IsDirectory(self): \"\"\"Determines if the file entry is a directory.\"\"\" if self._stat_object is None:", "self._entries = None self._file_system = file_system self.path_spec = path_spec @abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves", "or file system metadata. \"\"\" import abc from dfvfs.resolver import resolver class Directory(object):", "@abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves the parent file entry.\"\"\" def GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves", "return 0 # We cannot use len(self._directory.entries) since entries is a generator. return", "of resolver.Context). file_system: the file system object (instance of vfs.FileSystem). path_spec: the path", "file system (instance of vfs.FileSystem).\"\"\" return self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves the linked file", "file_system, path_spec): \"\"\"Initializes the directory object. Args: file_system: the file system object (instance", "implements the VFS directory object interface.\"\"\" def __init__(self, file_system, path_spec): \"\"\"Initializes the directory", "\"\"\"Retrieves the file system (instance of vfs.FileSystem).\"\"\" return self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves the", "file entry is a device.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return", "None self._file_system = file_system self.path_spec = path_spec @abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves directory entries.", "resolver_context: the resolver context (instance of resolver.Context). file_system: the file system object (instance", "the resolver context (instance of resolver.Context). file_system: the file system object (instance of", "-*- \"\"\"The Virtual File System (VFS) file entry object interface. The file entry", "the directory object. Args: file_system: the file system object (instance of vfs.FileSystem). path_spec:", "value to indicate if the file entry is the root file entry of", "self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines if the", "self._directory is None: return 0 # We cannot use len(self._directory.entries) since entries is", "of vfs.FileEntry).\"\"\" @property def type_indicator(self): \"\"\"The type indicator.\"\"\" type_indicator = getattr(self, u'TYPE_INDICATOR', None)", "a directory can contain a vast number of entries using a generator is", "is a device.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type ==", "self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines if the file entry is a directory.\"\"\" if self._stat_object", "def GetFileObject(self): \"\"\"Retrieves the file-like object (instance of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context)", "by name.\"\"\" name_lower = name.lower() matching_sub_file_entry = None for sub_file_entry in self.sub_file_entries: if", "can be various file system elements like a regular file, a directory or", "== self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines if the file entry is virtual (emulated by", "utf-8 -*- \"\"\"The Virtual File System (VFS) file entry object interface. The file", "if the file entry is a device.\"\"\" if self._stat_object is None: self._stat_object =", "None) if type_indicator is None: raise NotImplementedError( u'Invalid file system missing type indicator.')", "(instance of vfs.FileSystem).\"\"\" return self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves the linked file entry, e.g.", "@property def number_of_sub_file_entries(self): \"\"\"The number of sub file entries.\"\"\" if self._directory is None:", "generator is more memory efficient. Yields: A path specification (instance of path.PathSpec). \"\"\"", "import abc from dfvfs.resolver import resolver class Directory(object): \"\"\"Class that implements the VFS", "self.sub_file_entries: if sub_file_entry.name == name: return sub_file_entry if not case_sensitive and sub_file_entry.name.lower() ==", "== self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines if the file entry is a link.\"\"\" if", "self._directory = self._GetDirectory() if self._directory is None: return 0 # We cannot use", "file_system self._is_root = is_root self._is_virtual = is_virtual self._resolver_context = resolver_context self._stat_object = None", "resolver class Directory(object): \"\"\"Class that implements the VFS directory object interface.\"\"\" def __init__(self,", "the file entry is the root file entry of the corresponding file system.", "(VFS) file entry object interface. The file entry can be various file system", "device.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DEVICE def", "resolver.Context). file_system: the file system object (instance of vfs.FileSystem). path_spec: the path specification", "system. The default is False. is_virtual: optional boolean value to indicate if the", "self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines if the file entry is a pipe.\"\"\" if self._stat_object", "\"\"\"The type indicator.\"\"\" type_indicator = getattr(self, u'TYPE_INDICATOR', None) if type_indicator is None: raise", "the linked file entry, e.g. for a symbolic link.\"\"\" return @abc.abstractmethod def GetParentFileEntry(self):", "stat object (instance of vfs.VFSStat).\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return", "\"\"\"The Virtual File System (VFS) file entry object interface. The file entry can", "False. is_virtual: optional boolean value to indicate if the file entry is a", "is None: return 0 # We cannot use len(self._directory.entries) since entries is a", "file entry.\"\"\" def GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves a sub file entry by name.\"\"\"", "self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines if the file entry is a file.\"\"\" if self._stat_object", "def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): \"\"\"Initializes the file entry object.", "@property def type_indicator(self): \"\"\"The type indicator.\"\"\" type_indicator = getattr(self, u'TYPE_INDICATOR', None) if type_indicator", "entry.\"\"\" return self._is_root def IsSocket(self): \"\"\"Determines if the file entry is a socket.\"\"\"", "= self._GetStat() return self._stat_object.type == self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines if the file entry", "the root file entry.\"\"\" return self._is_root def IsSocket(self): \"\"\"Determines if the file entry", "vfs.VFSStat).\"\"\" @property def link(self): \"\"\"The full path of the linked file entry.\"\"\" return", "the parent file entry.\"\"\" def GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves a sub file entry", "return self._stat_object.type == self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines if the file entry is virtual", "is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines if", "the file system object (instance of vfs.FileSystem). path_spec: the path specification object (instance", "if not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return matching_sub_file_entry def GetStat(self): \"\"\"Retrieves the stat", "file entry object interface.\"\"\" def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): \"\"\"Initializes", "== self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines if the file entry is a file.\"\"\" if", "= sub_file_entry return matching_sub_file_entry def GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\"", "self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines", "return self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves the linked file entry, e.g. for a symbolic", "file entry object interface. The file entry can be various file system elements", "# -*- coding: utf-8 -*- \"\"\"The Virtual File System (VFS) file entry object", "stat object (instance of vfs.VFSStat).\"\"\" @property def link(self): \"\"\"The full path of the", "def name(self): \"\"\"The name of the file entry, which does not include the", "the full path.\"\"\" @property def number_of_sub_file_entries(self): \"\"\"The number of sub file entries.\"\"\" if", "value to indicate if the file entry is a virtual file entry emulated", "is the root file entry of the corresponding file system. The default is", "of the corresponding file system. The default is False. is_virtual: optional boolean value", "None: return 0 # We cannot use len(self._directory.entries) since entries is a generator.", "entry, which does not include the full path.\"\"\" @property def number_of_sub_file_entries(self): \"\"\"The number", "\"\"\"Retrieves directory entries. Since a directory can contain a vast number of entries", "self).__init__() self._entries = None self._file_system = file_system self.path_spec = path_spec @abc.abstractmethod def _EntriesGenerator(self):", "case_sensitive and sub_file_entry.name.lower() == name_lower: if not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return matching_sub_file_entry", "by the corresponding file system. The default is False. \"\"\" super(FileEntry, self).__init__() self._directory", "directory object (instance of vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self): \"\"\"Retrieves the stat object (instance", "= file_system self.path_spec = path_spec @abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves directory entries. Since a", "a directory.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DIRECTORY", "self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines if the file entry is", "\"\"\"Determines if the file entry is a pipe.\"\"\" if self._stat_object is None: self._stat_object", "self._stat_object = self._GetStat() return self._stat_object def IsAllocated(self): \"\"\"Determines if the file entry is", "System (VFS) file entry object interface. The file entry can be various file", "entries(self): \"\"\"The entries (generator of instance of path.OSPathSpec).\"\"\" for entry in self._EntriesGenerator(): yield", "if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_LINK def IsPipe(self):", "the file entry is allocated.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return", "raise NotImplementedError( u'Invalid file system missing type indicator.') return type_indicator def GetFileObject(self): \"\"\"Retrieves", "= self._GetStat() return self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines if the file entry is a", "the file entry is a file.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat()", "of vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" @property", "optional boolean value to indicate if the file entry is the root file", "file entry emulated by the corresponding file system. The default is False. \"\"\"", "sum(1 for path_spec in self._directory.entries) @abc.abstractproperty def sub_file_entries(self): \"\"\"The sub file entries (generator", "if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object def IsAllocated(self): \"\"\"Determines if", "file entry is a pipe.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return", "of path.PathSpec). \"\"\" @property def entries(self): \"\"\"The entries (generator of instance of path.OSPathSpec).\"\"\"", "object (instance of vfs.VFSStat).\"\"\" @property def link(self): \"\"\"The full path of the linked", "self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans up the file entry object.\"\"\" self._file_system.Close() self._file_system = None", "path_spec, is_root=False, is_virtual=False): \"\"\"Initializes the file entry object. Args: resolver_context: the resolver context", "None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines if the", "and sub_file_entry.name.lower() == name_lower: if not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return matching_sub_file_entry def", "\"\"\" import abc from dfvfs.resolver import resolver class Directory(object): \"\"\"Class that implements the", "linked file entry.\"\"\" return u'' @abc.abstractproperty def name(self): \"\"\"The name of the file", "self._is_virtual = is_virtual self._resolver_context = resolver_context self._stat_object = None self.path_spec = path_spec self._file_system.Open(path_spec=path_spec)", "entry is a directory.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type", "= None @abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves the directory object (instance of vfs.Directory).\"\"\" @abc.abstractmethod", "Args: resolver_context: the resolver context (instance of resolver.Context). file_system: the file system object", "system object (instance of vfs.FileSystem). path_spec: the path specification object (instance of path.PathSpec).", "\"\"\"Class that implements the VFS directory object interface.\"\"\" def __init__(self, file_system, path_spec): \"\"\"Initializes", "(instance of vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\"", "path.\"\"\" @property def number_of_sub_file_entries(self): \"\"\"The number of sub file entries.\"\"\" if self._directory is", "None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines if the", "= resolver_context self._stat_object = None self.path_spec = path_spec self._file_system.Open(path_spec=path_spec) def __del__(self): \"\"\"Cleans up", "def sub_file_entries(self): \"\"\"The sub file entries (generator of instance of vfs.FileEntry).\"\"\" @property def", "file entries (generator of instance of vfs.FileEntry).\"\"\" @property def type_indicator(self): \"\"\"The type indicator.\"\"\"", "object. Args: resolver_context: the resolver context (instance of resolver.Context). file_system: the file system", "up the file entry object.\"\"\" self._file_system.Close() self._file_system = None @abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves", "object interface. The file entry can be various file system elements like a", "the path specification object (instance of path.PathSpec). is_root: optional boolean value to indicate", "(instance of resolver.Context). file_system: the file system object (instance of vfs.FileSystem). path_spec: the", "of instance of path.OSPathSpec).\"\"\" for entry in self._EntriesGenerator(): yield entry class FileEntry(object): \"\"\"Class", "for path_spec in self._directory.entries) @abc.abstractproperty def sub_file_entries(self): \"\"\"The sub file entries (generator of", "the linked file entry.\"\"\" return u'' @abc.abstractproperty def name(self): \"\"\"The name of the", "u'TYPE_INDICATOR', None) if type_indicator is None: raise NotImplementedError( u'Invalid file system missing type", "file_system: the file system object (instance of vfs.FileSystem). path_spec: the path specification object", "\"\"\"Retrieves the directory object (instance of vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self): \"\"\"Retrieves the stat", "a symbolic link.\"\"\" return @abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves the parent file entry.\"\"\" def", "self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_SOCKET def IsVirtual(self): \"\"\"Determines", "not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return matching_sub_file_entry def GetStat(self): \"\"\"Retrieves the stat object", "is the root file entry.\"\"\" return self._is_root def IsSocket(self): \"\"\"Determines if the file", "return @abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves the parent file entry.\"\"\" def GetSubFileEntryByName(self, name, case_sensitive=True):", "system (instance of vfs.FileSystem).\"\"\" return self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves the linked file entry,", "path.PathSpec). is_root: optional boolean value to indicate if the file entry is the", "entry object interface.\"\"\" def __init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): \"\"\"Initializes the", "resolver_context, file_system, path_spec, is_root=False, is_virtual=False): \"\"\"Initializes the file entry object. Args: resolver_context: the", "return self._stat_object.type == self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines if the file entry is a", "= getattr(self, u'TYPE_INDICATOR', None) if type_indicator is None: raise NotImplementedError( u'Invalid file system", "a virtual file entry emulated by the corresponding file system. The default is", "the VFS file entry object interface.\"\"\" def __init__( self, resolver_context, file_system, path_spec, is_root=False,", "= path_spec @abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves directory entries. Since a directory can contain", "name.\"\"\" name_lower = name.lower() matching_sub_file_entry = None for sub_file_entry in self.sub_file_entries: if sub_file_entry.name", "\"\"\"Class that implements the VFS file entry object interface.\"\"\" def __init__( self, resolver_context,", "len(self._directory.entries) since entries is a generator. return sum(1 for path_spec in self._directory.entries) @abc.abstractproperty", "file entry can be various file system elements like a regular file, a", "file system. The default is False. \"\"\" super(FileEntry, self).__init__() self._directory = None self._file_system", "self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines if the file", "self._GetStat() return self._stat_object.type == self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines if the file entry is", "def IsDirectory(self): \"\"\"Determines if the file entry is a directory.\"\"\" if self._stat_object is", "def _GetDirectory(self): \"\"\"Retrieves the directory object (instance of vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self): \"\"\"Retrieves", "@abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves directory entries. Since a directory can contain a vast", "def __del__(self): \"\"\"Cleans up the file entry object.\"\"\" self._file_system.Close() self._file_system = None @abc.abstractmethod", "GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves a sub file entry by name.\"\"\" name_lower = name.lower()", "not include the full path.\"\"\" @property def number_of_sub_file_entries(self): \"\"\"The number of sub file", "entries.\"\"\" if self._directory is None: self._directory = self._GetDirectory() if self._directory is None: return", "self._EntriesGenerator(): yield entry class FileEntry(object): \"\"\"Class that implements the VFS file entry object", "Directory(object): \"\"\"Class that implements the VFS directory object interface.\"\"\" def __init__(self, file_system, path_spec):", "sub_file_entry return matching_sub_file_entry def GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" if", "the corresponding file system. The default is False. \"\"\" super(FileEntry, self).__init__() self._directory =", "GetFileSystem(self): \"\"\"Retrieves the file system (instance of vfs.FileSystem).\"\"\" return self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves", "is None: raise NotImplementedError( u'Invalid file system missing type indicator.') return type_indicator def", "self._stat_object def IsAllocated(self): \"\"\"Determines if the file entry is allocated.\"\"\" if self._stat_object is", "\"\"\"Initializes the directory object. Args: file_system: the file system object (instance of vfs.FileSystem).", "a directory or file system metadata. \"\"\" import abc from dfvfs.resolver import resolver", "if the file entry is the root file entry.\"\"\" return self._is_root def IsSocket(self):", "IsLink(self): \"\"\"Determines if the file entry is a link.\"\"\" if self._stat_object is None:", "type indicator.') return type_indicator def GetFileObject(self): \"\"\"Retrieves the file-like object (instance of file_io.FileIO).\"\"\"", "def link(self): \"\"\"The full path of the linked file entry.\"\"\" return u'' @abc.abstractproperty", "file, a directory or file system metadata. \"\"\" import abc from dfvfs.resolver import", "path specification (instance of path.PathSpec). \"\"\" @property def entries(self): \"\"\"The entries (generator of", "self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves the file system (instance of vfs.FileSystem).\"\"\" return self._file_system", "self._GetStat() return self._stat_object.type == self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines if the file entry is", "a pipe.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_PIPE", "file entry object.\"\"\" self._file_system.Close() self._file_system = None @abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves the directory", "GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" if self._stat_object is None: self._stat_object", "if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_SOCKET def IsVirtual(self):", "if the file entry is a file.\"\"\" if self._stat_object is None: self._stat_object =", "_GetDirectory(self): \"\"\"Retrieves the directory object (instance of vfs.Directory).\"\"\" @abc.abstractmethod def _GetStat(self): \"\"\"Retrieves the", "cannot use len(self._directory.entries) since entries is a generator. return sum(1 for path_spec in", "context (instance of resolver.Context). file_system: the file system object (instance of vfs.FileSystem). path_spec:", "vfs.FileSystem). path_spec: the path specification object (instance of path.PathSpec). \"\"\" super(Directory, self).__init__() self._entries", "= None self._file_system = file_system self.path_spec = path_spec @abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves directory", "matching_sub_file_entry def GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" if self._stat_object is", "self._stat_object.type == self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines if the file entry is a file.\"\"\"", "return self._stat_object.type == self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines if the file entry is a", "indicate if the file entry is a virtual file entry emulated by the", "sub_file_entry.name.lower() == name_lower: if not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return matching_sub_file_entry def GetStat(self):", "\"\"\" super(Directory, self).__init__() self._entries = None self._file_system = file_system self.path_spec = path_spec @abc.abstractmethod", "default is False. is_virtual: optional boolean value to indicate if the file entry", "self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): \"\"\"Initializes the file entry object. Args: resolver_context:", "self._is_root = is_root self._is_virtual = is_virtual self._resolver_context = resolver_context self._stat_object = None self.path_spec", "the file entry object.\"\"\" self._file_system.Close() self._file_system = None @abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves the", "\"\"\"Retrieves the file-like object (instance of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self):", "u'Invalid file system missing type indicator.') return type_indicator def GetFileObject(self): \"\"\"Retrieves the file-like", "entry is a file.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type", "None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines if the", "object (instance of path.PathSpec). \"\"\" super(Directory, self).__init__() self._entries = None self._file_system = file_system", "def IsRoot(self): \"\"\"Determines if the file entry is the root file entry.\"\"\" return", "directory can contain a vast number of entries using a generator is more", "vfs.FileSystem). path_spec: the path specification object (instance of path.PathSpec). is_root: optional boolean value", "GetParentFileEntry(self): \"\"\"Retrieves the parent file entry.\"\"\" def GetSubFileEntryByName(self, name, case_sensitive=True): \"\"\"Retrieves a sub", "directory or file system metadata. \"\"\" import abc from dfvfs.resolver import resolver class", "name: return sub_file_entry if not case_sensitive and sub_file_entry.name.lower() == name_lower: if not matching_sub_file_entry:", "is None: self._directory = self._GetDirectory() if self._directory is None: return 0 # We", "of entries using a generator is more memory efficient. Yields: A path specification", "the root file entry of the corresponding file system. The default is False.", "resolver context (instance of resolver.Context). file_system: the file system object (instance of vfs.FileSystem).", "in self.sub_file_entries: if sub_file_entry.name == name: return sub_file_entry if not case_sensitive and sub_file_entry.name.lower()", "= self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines if the file entry", "entry is a socket.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type", "_GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" @property def link(self): \"\"\"The full", "self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines if the file entry is a device.\"\"\" if self._stat_object", "self._file_system = file_system self.path_spec = path_spec @abc.abstractmethod def _EntriesGenerator(self): \"\"\"Retrieves directory entries. Since", "to indicate if the file entry is the root file entry of the", "return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves the file system (instance of vfs.FileSystem).\"\"\"", "= name.lower() matching_sub_file_entry = None for sub_file_entry in self.sub_file_entries: if sub_file_entry.name == name:", "object interface.\"\"\" def __init__(self, file_system, path_spec): \"\"\"Initializes the directory object. Args: file_system: the", "if the file entry is a pipe.\"\"\" if self._stat_object is None: self._stat_object =", "linked file entry, e.g. for a symbolic link.\"\"\" return @abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves", "= self._GetStat() return self._stat_object def IsAllocated(self): \"\"\"Determines if the file entry is allocated.\"\"\"", "is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines if", "return self._is_root def IsSocket(self): \"\"\"Determines if the file entry is a socket.\"\"\" if", "(instance of vfs.FileSystem). path_spec: the path specification object (instance of path.PathSpec). is_root: optional", "matching_sub_file_entry = sub_file_entry return matching_sub_file_entry def GetStat(self): \"\"\"Retrieves the stat object (instance of", "file system. The default is False. is_virtual: optional boolean value to indicate if", "symbolic link.\"\"\" return @abc.abstractmethod def GetParentFileEntry(self): \"\"\"Retrieves the parent file entry.\"\"\" def GetSubFileEntryByName(self,", "entries (generator of instance of vfs.FileEntry).\"\"\" @property def type_indicator(self): \"\"\"The type indicator.\"\"\" type_indicator", "file entry is a directory.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return", "entry by name.\"\"\" name_lower = name.lower() matching_sub_file_entry = None for sub_file_entry in self.sub_file_entries:", "sub file entries.\"\"\" if self._directory is None: self._directory = self._GetDirectory() if self._directory is", "\"\"\" @property def entries(self): \"\"\"The entries (generator of instance of path.OSPathSpec).\"\"\" for entry", "coding: utf-8 -*- \"\"\"The Virtual File System (VFS) file entry object interface. The", "\"\"\"The name of the file entry, which does not include the full path.\"\"\"", "the file-like object (instance of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec, resolver_context=self._resolver_context) def GetFileSystem(self): \"\"\"Retrieves", "self._directory is None: self._directory = self._GetDirectory() if self._directory is None: return 0 #", "vast number of entries using a generator is more memory efficient. Yields: A", "is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines if", "if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_PIPE def IsRoot(self):", "is_root=False, is_virtual=False): \"\"\"Initializes the file entry object. Args: resolver_context: the resolver context (instance", "the file system (instance of vfs.FileSystem).\"\"\" return self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves the linked", "type_indicator def GetFileObject(self): \"\"\"Retrieves the file-like object (instance of file_io.FileIO).\"\"\" return resolver.Resolver.OpenFileObject( self.path_spec,", "entry is a pipe.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type", "system metadata. \"\"\" import abc from dfvfs.resolver import resolver class Directory(object): \"\"\"Class that", "path.PathSpec). \"\"\" super(Directory, self).__init__() self._entries = None self._file_system = file_system self.path_spec = path_spec", "system. The default is False. \"\"\" super(FileEntry, self).__init__() self._directory = None self._file_system =", "corresponding file system. The default is False. \"\"\" super(FileEntry, self).__init__() self._directory = None", "None @abc.abstractmethod def _GetDirectory(self): \"\"\"Retrieves the directory object (instance of vfs.Directory).\"\"\" @abc.abstractmethod def", "self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DIRECTORY def IsFile(self): \"\"\"Determines if the file", "def number_of_sub_file_entries(self): \"\"\"The number of sub file entries.\"\"\" if self._directory is None: self._directory", "return matching_sub_file_entry def GetStat(self): \"\"\"Retrieves the stat object (instance of vfs.VFSStat).\"\"\" if self._stat_object", "\"\"\"Initializes the file entry object. Args: resolver_context: the resolver context (instance of resolver.Context).", "return sub_file_entry if not case_sensitive and sub_file_entry.name.lower() == name_lower: if not matching_sub_file_entry: matching_sub_file_entry", "\"\"\"Determines if the file entry is a directory.\"\"\" if self._stat_object is None: self._stat_object", "entry object. Args: resolver_context: the resolver context (instance of resolver.Context). file_system: the file", "a vast number of entries using a generator is more memory efficient. Yields:", "file entries.\"\"\" if self._directory is None: self._directory = self._GetDirectory() if self._directory is None:", "the file entry is a virtual file entry emulated by the corresponding file", "a socket.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_SOCKET", "(instance of vfs.VFSStat).\"\"\" @property def link(self): \"\"\"The full path of the linked file", "= is_virtual self._resolver_context = resolver_context self._stat_object = None self.path_spec = path_spec self._file_system.Open(path_spec=path_spec) def", "object (instance of vfs.FileSystem). path_spec: the path specification object (instance of path.PathSpec). is_root:", "is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_FILE def IsLink(self): \"\"\"Determines if", "return self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines if the file entry is a device.\"\"\" if", "file entry is a link.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return", "type indicator.\"\"\" type_indicator = getattr(self, u'TYPE_INDICATOR', None) if type_indicator is None: raise NotImplementedError(", "We cannot use len(self._directory.entries) since entries is a generator. return sum(1 for path_spec", "== self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines if the file entry is a directory.\"\"\" if", "\"\"\"Retrieves the linked file entry, e.g. for a symbolic link.\"\"\" return @abc.abstractmethod def", "self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_LINK def IsPipe(self): \"\"\"Determines", "that implements the VFS file entry object interface.\"\"\" def __init__( self, resolver_context, file_system,", "return u'' @abc.abstractproperty def name(self): \"\"\"The name of the file entry, which does", "is_virtual: optional boolean value to indicate if the file entry is a virtual", "File System (VFS) file entry object interface. The file entry can be various", "sub_file_entry.name == name: return sub_file_entry if not case_sensitive and sub_file_entry.name.lower() == name_lower: if", "number_of_sub_file_entries(self): \"\"\"The number of sub file entries.\"\"\" if self._directory is None: self._directory =", "__init__( self, resolver_context, file_system, path_spec, is_root=False, is_virtual=False): \"\"\"Initializes the file entry object. Args:", "None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines if the", "== self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines if the file entry is the root file", "of the linked file entry.\"\"\" return u'' @abc.abstractproperty def name(self): \"\"\"The name of", "sub_file_entries(self): \"\"\"The sub file entries (generator of instance of vfs.FileEntry).\"\"\" @property def type_indicator(self):", "if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DIRECTORY def IsFile(self):", "is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines if", "if the file entry is a socket.\"\"\" if self._stat_object is None: self._stat_object =", "return sum(1 for path_spec in self._directory.entries) @abc.abstractproperty def sub_file_entries(self): \"\"\"The sub file entries", "entry is the root file entry of the corresponding file system. The default", "if self._directory is None: return 0 # We cannot use len(self._directory.entries) since entries", "the file entry object. Args: resolver_context: the resolver context (instance of resolver.Context). file_system:", "contain a vast number of entries using a generator is more memory efficient.", "file entry object. Args: resolver_context: the resolver context (instance of resolver.Context). file_system: the", "FileEntry(object): \"\"\"Class that implements the VFS file entry object interface.\"\"\" def __init__( self,", "file entry of the corresponding file system. The default is False. is_virtual: optional", "is None: self._stat_object = self._GetStat() return self._stat_object def IsAllocated(self): \"\"\"Determines if the file", "implements the VFS file entry object interface.\"\"\" def __init__( self, resolver_context, file_system, path_spec,", "a generator. return sum(1 for path_spec in self._directory.entries) @abc.abstractproperty def sub_file_entries(self): \"\"\"The sub", "def IsLink(self): \"\"\"Determines if the file entry is a link.\"\"\" if self._stat_object is", "VFS directory object interface.\"\"\" def __init__(self, file_system, path_spec): \"\"\"Initializes the directory object. Args:", "is None: self._stat_object = self._GetStat() return self._stat_object.is_allocated def IsDevice(self): \"\"\"Determines if the file", "self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_PIPE def IsRoot(self): \"\"\"Determines if the file", "self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_DEVICE def IsDirectory(self): \"\"\"Determines", "path.PathSpec). \"\"\" @property def entries(self): \"\"\"The entries (generator of instance of path.OSPathSpec).\"\"\" for", "object. Args: file_system: the file system object (instance of vfs.FileSystem). path_spec: the path", "def entries(self): \"\"\"The entries (generator of instance of path.OSPathSpec).\"\"\" for entry in self._EntriesGenerator():", "class Directory(object): \"\"\"Class that implements the VFS directory object interface.\"\"\" def __init__(self, file_system,", "pipe.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_PIPE def", "0 # We cannot use len(self._directory.entries) since entries is a generator. return sum(1", "of vfs.FileSystem). path_spec: the path specification object (instance of path.PathSpec). \"\"\" super(Directory, self).__init__()", "file.\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object.type == self._stat_object.TYPE_FILE def", "instance of path.OSPathSpec).\"\"\" for entry in self._EntriesGenerator(): yield entry class FileEntry(object): \"\"\"Class that", "self._file_system def GetLinkedFileEntry(self): \"\"\"Retrieves the linked file entry, e.g. for a symbolic link.\"\"\"", "The default is False. is_virtual: optional boolean value to indicate if the file", "IsDevice(self): \"\"\"Determines if the file entry is a device.\"\"\" if self._stat_object is None:", "= is_root self._is_virtual = is_virtual self._resolver_context = resolver_context self._stat_object = None self.path_spec =", "def __init__(self, file_system, path_spec): \"\"\"Initializes the directory object. Args: file_system: the file system", "vfs.VFSStat).\"\"\" if self._stat_object is None: self._stat_object = self._GetStat() return self._stat_object def IsAllocated(self): \"\"\"Determines", "== name_lower: if not matching_sub_file_entry: matching_sub_file_entry = sub_file_entry return matching_sub_file_entry def GetStat(self): \"\"\"Retrieves", "file entry is a virtual file entry emulated by the corresponding file system." ]
[ "'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay' name = 'Conseil municipal de Saguenay' url = 'http://ville.saguenay.ca'", "= 'legislature' division_id = 'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay' name = 'Conseil municipal de", "from __future__ import unicode_literals from utils import CanadianJurisdiction class Saguenay(CanadianJurisdiction): classification = 'legislature'", "Saguenay(CanadianJurisdiction): classification = 'legislature' division_id = 'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay' name = 'Conseil", "CanadianJurisdiction class Saguenay(CanadianJurisdiction): classification = 'legislature' division_id = 'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay' name", "= 'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay' name = 'Conseil municipal de Saguenay' url =", "__future__ import unicode_literals from utils import CanadianJurisdiction class Saguenay(CanadianJurisdiction): classification = 'legislature' division_id", "from utils import CanadianJurisdiction class Saguenay(CanadianJurisdiction): classification = 'legislature' division_id = 'ocd-division/country:ca/csd:2494068' division_name", "utils import CanadianJurisdiction class Saguenay(CanadianJurisdiction): classification = 'legislature' division_id = 'ocd-division/country:ca/csd:2494068' division_name =", "<filename>ca_qc_saguenay/__init__.py from __future__ import unicode_literals from utils import CanadianJurisdiction class Saguenay(CanadianJurisdiction): classification =", "'legislature' division_id = 'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay' name = 'Conseil municipal de Saguenay'", "import CanadianJurisdiction class Saguenay(CanadianJurisdiction): classification = 'legislature' division_id = 'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay'", "division_id = 'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay' name = 'Conseil municipal de Saguenay' url", "class Saguenay(CanadianJurisdiction): classification = 'legislature' division_id = 'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay' name =", "unicode_literals from utils import CanadianJurisdiction class Saguenay(CanadianJurisdiction): classification = 'legislature' division_id = 'ocd-division/country:ca/csd:2494068'", "import unicode_literals from utils import CanadianJurisdiction class Saguenay(CanadianJurisdiction): classification = 'legislature' division_id =", "classification = 'legislature' division_id = 'ocd-division/country:ca/csd:2494068' division_name = 'Saguenay' name = 'Conseil municipal" ]
[ "dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1", "as pd from cellphonedb.utils import dataframe_format def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame, round_decimals: bool", "pd.DataFrame, round_decimals: bool = False) -> pd.DataFrame: dataframe1 = dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True)", "= list(dataframe1.columns.values) columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort() dataframe2 =", "= dataframe1.round(5) if not dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2 = dataframe2.round(5)", "dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1 = dataframe1.round(5) if not dataframe2.empty: dataframe2", "dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if", "dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2 = dataframe2.round(5) if dataframe1.empty and dataframe2.empty:", "from cellphonedb.utils import dataframe_format def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame, round_decimals: bool = False)", "import pandas as pd from cellphonedb.utils import dataframe_format def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame,", "False) -> pd.DataFrame: dataframe1 = dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort()", "dataframe1) columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not dataframe1.empty: dataframe1", "dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1 = dataframe1.round(5)", "dataframe_format def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame, round_decimals: bool = False) -> pd.DataFrame: dataframe1", "= dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1 =", "pd.DataFrame, dataframe2: pd.DataFrame, round_decimals: bool = False) -> pd.DataFrame: dataframe1 = dataframe1.copy(deep=True) dataframe2", "-> pd.DataFrame: dataframe1 = dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort() dataframe1", "= dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not", "dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1 = dataframe1.round(5) if not dataframe2.empty: dataframe2 =", "= False) -> pd.DataFrame: dataframe1 = dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values)", "pandas as pd from cellphonedb.utils import dataframe_format def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame, round_decimals:", "dataframe2) if not dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1 = dataframe1.round(5) if", "list(dataframe1.columns.values) columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2,", "if round_decimals: dataframe2 = dataframe2.round(5) if dataframe1.empty and dataframe2.empty: return pd.Series(dataframe1.columns.values).equals(pd.Series(dataframe2.columns.values)) return dataframe1.equals(dataframe2)", "def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame, round_decimals: bool = False) -> pd.DataFrame: dataframe1 =", "if not dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2 = dataframe2.round(5) if dataframe1.empty", "= dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2 = dataframe2.round(5) if dataframe1.empty and dataframe2.empty: return pd.Series(dataframe1.columns.values).equals(pd.Series(dataframe2.columns.values))", "= dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1)", "dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1 = dataframe1.round(5) if not dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if", "import dataframe_format def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame, round_decimals: bool = False) -> pd.DataFrame:", "list(dataframe2.columns.values) columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if", "cellphonedb.utils import dataframe_format def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame, round_decimals: bool = False) ->", "round_decimals: dataframe1 = dataframe1.round(5) if not dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2", "round_decimals: bool = False) -> pd.DataFrame: dataframe1 = dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True) columns_names_1", "bool = False) -> pd.DataFrame: dataframe1 = dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True) columns_names_1 =", "dataframe1 = dataframe1.round(5) if not dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2 =", "dataframe1.round(5) if not dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2 = dataframe2.round(5) if", "dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2 = dataframe2.round(5) if dataframe1.empty and dataframe2.empty: return pd.Series(dataframe1.columns.values).equals(pd.Series(dataframe2.columns.values)) return", "dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort()", "if round_decimals: dataframe1 = dataframe1.round(5) if not dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals:", "dataframe2: pd.DataFrame, round_decimals: bool = False) -> pd.DataFrame: dataframe1 = dataframe1.copy(deep=True) dataframe2 =", "<gh_stars>100-1000 import pandas as pd from cellphonedb.utils import dataframe_format def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2:", "dataframe1 = dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1,", "dataframe2 = dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 =", "columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort() dataframe2", "= dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1 = dataframe1.round(5) if not dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True)", "columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2)", "dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2", "dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not dataframe1.empty:", "not dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1 = dataframe1.round(5) if not dataframe2.empty:", "not dataframe2.empty: dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2 = dataframe2.round(5) if dataframe1.empty and", "pd from cellphonedb.utils import dataframe_format def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame, round_decimals: bool =", "pd.DataFrame: dataframe1 = dataframe1.copy(deep=True) dataframe2 = dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort() dataframe1 =", "if not dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals: dataframe1 = dataframe1.round(5) if not", "= dataframe2.copy(deep=True) columns_names_1 = list(dataframe1.columns.values) columns_names_1.sort() dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1) columns_names_2 = list(dataframe2.columns.values)", "columns_names_2 = list(dataframe2.columns.values) columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not dataframe1.empty: dataframe1 =", "dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame, round_decimals: bool = False) -> pd.DataFrame: dataframe1 = dataframe1.copy(deep=True)", "= list(dataframe2.columns.values) columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True)", "dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True) if round_decimals: dataframe2 = dataframe2.round(5) if dataframe1.empty and dataframe2.empty: return", "columns_names_2.sort() dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2) if not dataframe1.empty: dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True) if round_decimals:" ]
[ "def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that it fetches the given payload.\"\"\" topic = \"test/camera\"", "{}, entry) data = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data)", "import async_start from openpeerpower.setup import async_setup_component from tests.common import ( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component,", "test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT subscriptions are managed when entity_id is updated.\"\"\" registry =", ") async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device", "not None assert device.identifiers == {(\"mqtt\", \"helloworld\")} assert device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")} assert", "config = { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"],", "\"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None", "= '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state", "discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data = '{", "async def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that it fetches the given payload.\"\"\" topic =", "is not None assert device.identifiers == {(\"mqtt\", \"helloworld\")} assert device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")}", "async def test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test handling of bad discovery message.\"\"\" entry =", "MQTT subscriptions are managed when entity_id is updated.\"\"\" registry = mock_registry(opp, {}) mock_mqtt", "== {(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer == \"Whatever\" assert device.name == \"Beer\" assert device.model", "None assert device.identifiers == {(\"mqtt\", \"helloworld\")} assert device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer", "0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is", "entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\"", "\"0.1-beta\" async def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device registry update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp)", "{ \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\",", "component.\"\"\" import json from unittest.mock import ANY from openpeerpower.components import camera, mqtt from", "mqtt from openpeerpower.components.mqtt.discovery import async_start from openpeerpower.setup import async_setup_component from tests.common import (", "}, ] }, ) async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert len(opp.states.async_all()) == 1 async def", "{}, entry) registry = await opp.helpers.device_registry.async_get_registry() data = json.dumps( { \"platform\": \"mqtt\", \"name\":", "\"\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async def test_discovery_update_camera(opp,", "= await async_mock_mqtt_component(opp) assert await async_setup_component( opp, camera.DOMAIN, { camera.DOMAIN: [ { \"platform\":", "updated.\"\"\" registry = mock_registry(opp, {}) mock_mqtt = await async_mock_mqtt_component(opp) assert await async_setup_component( opp,", "test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT camera device registry integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await", "when entity_id is updated.\"\"\" registry = mock_registry(opp, {}) mock_mqtt = await async_mock_mqtt_component(opp) assert", "\"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", } ] }, ) state = opp.states.get(\"camera.beer\") assert state is", "def test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test update of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await", "async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() data = json.dumps( { \"platform\":", "= mock_registry(opp, {}) mock_mqtt = await async_mock_mqtt_component(opp) assert await async_setup_component( opp, camera.DOMAIN, {", "id option only creates one camera per unique_id.\"\"\" await async_mock_mqtt_component(opp) await async_setup_component( opp,", "is None async def test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test update of discovered camera.\"\"\" entry", "{(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer == \"Whatever\" assert device.name == \"Beer\" assert device.model ==", "\"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\":", "= opp.states.get(\"camera.milk\") assert state is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY,", "= await opp.helpers.device_registry.async_get_registry() data = json.dumps( { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\",", "== \"Beer\" config[\"device\"][\"name\"] = \"Milk\" data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done()", "} ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert", "from tests.common import ( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, ) async def test_run_camera_setup(opp, aiohttp_client):", "set()) assert device is not None assert device.name == \"Beer\" config[\"device\"][\"name\"] = \"Milk\"", "caplog): \"\"\"Test update of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {},", "assert len(opp.states.async_all()) == 1 async def test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test removal of discovered", "len(opp.states.async_all()) == 1 async def test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test removal of discovered camera.\"\"\"", "import camera, mqtt from openpeerpower.components.mqtt.discovery import async_start from openpeerpower.setup import async_setup_component from tests.common", "\"Test Camera 2\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, ] }, ) async_fire_mqtt_message(opp, \"test-topic\",", "state = opp.states.get(\"camera.milk\") assert state is not None assert state.name == \"Milk\" state", "= '{ \"name\": \"Beer\" }' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}'", "of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 =", "}, \"unique_id\": \"veryunique\", } data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device", "== \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is", "== \"Beer\" assert device.model == \"Glass\" assert device.sw_version == \"0.1-beta\" async def test_entity_device_info_update(opp,", "entry) data1 = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' data2 = '{ \"name\":", "per unique_id.\"\"\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", { \"camera\": [ { \"platform\":", "\"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data)", "camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\":", "device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert device.identifiers ==", "registry = await opp.helpers.device_registry.async_get_registry() config = { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\",", "opp.states.get(\"camera.milk\") assert state is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0,", "mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert", "MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\" }' data2", "\"topic\": topic, \"name\": \"Test Camera\"}}, ) url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\") client", "state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state", "\"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async def", "is None async def test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT subscriptions are managed when entity_id", "1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) async def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT camera device", ") async def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that it fetches the given payload.\"\"\" topic", "await opp.async_block_till_done() state = opp.states.get(\"camera.milk\") assert state is not None assert state.name ==", "'{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() state =", "entry) data1 = '{ \"name\": \"Beer\" }' data2 = '{ \"name\": \"Milk\",' '", "\"test-topic\", \"payload\") assert len(opp.states.async_all()) == 1 async def test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test removal", "is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.milk\") assert state is", "None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) async def test_entity_device_info_with_identifier(opp, mqtt_mock):", "device registry update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry", "body = await resp.text() assert body == \"beer\" async def test_unique_id(opp): \"\"\"Test unique", "\"Beer\" assert device.model == \"Glass\" assert device.sw_version == \"0.1-beta\" async def test_entity_device_info_update(opp, mqtt_mock):", "device.name == \"Beer\" config[\"device\"][\"name\"] = \"Milk\" data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await", "device.model == \"Glass\" assert device.sw_version == \"0.1-beta\" async def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device", "camera.DOMAIN, { camera.DOMAIN: [ { \"platform\": \"mqtt\", \"name\": \"beer\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\",", "opp.states.get(\"camera.beer\") assert state is None async def test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test update of", "\"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } data =", "\"openpeerpower\", {}, entry) data = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\",", "entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data = '{ \"name\": \"Beer\",'", "None) async def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT camera device registry integration.\"\"\" entry =", "assert state is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None)", "\"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, { \"platform\": \"mqtt\", \"name\": \"Test Camera 2\", \"topic\": \"test-topic\",", "device.manufacturer == \"Whatever\" assert device.name == \"Beer\" assert device.model == \"Glass\" assert device.sw_version", "async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() config = { \"platform\": \"mqtt\",", "\"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, ] }, ) async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert len(opp.states.async_all()) ==", "\"\"\"Test handling of bad discovery message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {},", "entity_id is updated.\"\"\" registry = mock_registry(opp, {}) mock_mqtt = await async_mock_mqtt_component(opp) assert await", "topic = \"test/camera\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", {\"camera\": {\"platform\": \"mqtt\", \"topic\":", "\"camera\": [ { \"platform\": \"mqtt\", \"name\": \"Test Camera 1\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\",", "await opp.helpers.device_registry.async_get_registry() data = json.dumps( { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\":", "json.dumps( { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"], \"connections\":", "0, None) async def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT camera device registry integration.\"\"\" entry", "\"Test Camera\"}}, ) url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\") client = await aiohttp_client(opp.http.app)", "\"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None assert", "\"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not", "device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert device.name ==", "state is None async def test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT subscriptions are managed when", "\"\"\"Test MQTT camera device registry integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\",", "json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device", "data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None assert state.name", "= { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"], \"connections\":", "assert device.identifiers == {(\"mqtt\", \"helloworld\")} assert device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer ==", "removal of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data", "}' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await", "not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state =", "entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry()", "async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None", "state is not None assert state.name == \"Milk\" state = opp.states.get(\"camera.beer\") assert state", "async_fire_mqtt_message(opp, topic, \"beer\") client = await aiohttp_client(opp.http.app) resp = await client.get(url) assert resp.status", "\"\"\"The tests for mqtt camera component.\"\"\" import json from unittest.mock import ANY from", "\"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not", "device is not None assert device.identifiers == {(\"mqtt\", \"helloworld\")} assert device.connections == {(\"mac\",", "await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None state = opp.states.get(\"camera.milk\") assert", "opp, \"camera\", { \"camera\": [ { \"platform\": \"mqtt\", \"name\": \"Test Camera 1\", \"topic\":", "= opp.states.get(\"camera.beer\") assert state is None async def test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test update", "= registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert device.name == \"Milk\"", "\"mqtt\", \"name\": \"Test Camera 1\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, { \"platform\": \"mqtt\",", "mqtt_mock, caplog): \"\"\"Test handling of bad discovery message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp,", "{}) mock_mqtt = await async_mock_mqtt_component(opp) assert await async_setup_component( opp, camera.DOMAIN, { camera.DOMAIN: [", "\"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } data = json.dumps(config) async_fire_mqtt_message(opp,", "None async def test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test handling of bad discovery message.\"\"\" entry", "camera device registry integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry)", "option only creates one camera per unique_id.\"\"\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\",", "== 200 body = await resp.text() assert body == \"beer\" async def test_unique_id(opp):", "state = opp.states.get(\"camera.beer\") assert state is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp,", "\"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None assert", "data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2)", "[\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", },", "\"name\": \"Test Camera 2\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, ] }, ) async_fire_mqtt_message(opp,", "\"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' data2 =", "camera component.\"\"\" import json from unittest.mock import ANY from openpeerpower.components import camera, mqtt", "registry = await opp.helpers.device_registry.async_get_registry() data = json.dumps( { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\":", "new_entity_id=\"camera.milk\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None state = opp.states.get(\"camera.milk\")", "None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\")", "assert state is not None assert state.name == \"Milk\" state = opp.states.get(\"camera.beer\") assert", "assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done()", "\"\"\"Test device registry update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry)", "given payload.\"\"\" topic = \"test/camera\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", {\"camera\": {\"platform\":", "\"device\": { \"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\",", "def test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test removal of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await", "client = await aiohttp_client(opp.http.app) resp = await client.get(url) assert resp.status == 200 body", "\"Milk\" state = opp.states.get(\"camera.milk\") assert state is None async def test_discovery_broken(opp, mqtt_mock, caplog):", "discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{", "[[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\",", "state is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) mock_mqtt.async_subscribe.reset_mock()", "that it fetches the given payload.\"\"\" topic = \"test/camera\" await async_mock_mqtt_component(opp) await async_setup_component(", "opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\") client = await aiohttp_client(opp.http.app) resp = await client.get(url) assert", "\"\"\"Test unique id option only creates one camera per unique_id.\"\"\" await async_mock_mqtt_component(opp) await", "not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) async def test_entity_device_info_with_identifier(opp,", "is None state = opp.states.get(\"camera.milk\") assert state is not None assert mock_mqtt.async_subscribe.call_count ==", "\"veryunique\", } data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\",", "not None assert state.name == \"Milk\" state = opp.states.get(\"camera.milk\") assert state is None", "async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", { \"camera\": [ { \"platform\": \"mqtt\", \"name\": \"Test", "async def test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test update of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN)", "opp.helpers.device_registry.async_get_registry() config = { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\":", "from openpeerpower.components import camera, mqtt from openpeerpower.components.mqtt.discovery import async_start from openpeerpower.setup import async_setup_component", "= MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data = '{ \"name\": \"Beer\",' '", "data2) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None assert state.name", "state = opp.states.get(\"camera.milk\") assert state is None async def test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test", "None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.milk\") assert state is not", "\"platform\": \"mqtt\", \"name\": \"Test Camera 1\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, { \"platform\":", "assert device.name == \"Beer\" config[\"device\"][\"name\"] = \"Milk\" data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data)", "async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None", "async_start(opp, \"openpeerpower\", {}, entry) data = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp,", "\"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", } ] }, ) state = opp.states.get(\"camera.beer\") assert state", "[ { \"platform\": \"mqtt\", \"name\": \"Test Camera 1\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", },", "test_unique_id(opp): \"\"\"Test unique id option only creates one camera per unique_id.\"\"\" await async_mock_mqtt_component(opp)", "\"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\":", "state.name == \"Milk\" state = opp.states.get(\"camera.milk\") assert state is None async def test_discovery_broken(opp,", "opp.states.get(\"camera.beer\") assert state is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0,", "1 async def test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test removal of discovered camera.\"\"\" entry =", "ANY, 0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state", "async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is", "}, ) async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert len(opp.states.async_all()) == 1 async def test_discovery_removal_camera(opp, mqtt_mock,", "mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) async def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT camera device registry", "{ \"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\":", "== \"Milk\" state = opp.states.get(\"camera.milk\") assert state is None async def test_discovery_broken(opp, mqtt_mock,", "async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, ) async def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that it fetches the", "\"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() data = json.dumps( { \"platform\": \"mqtt\",", "entry) registry = await opp.helpers.device_registry.async_get_registry() config = { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\":", "resp.text() assert body == \"beer\" async def test_unique_id(opp): \"\"\"Test unique id option only", "== 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done() state =", "data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done()", "assert state is None async def test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test handling of bad", "= MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\",' '", "not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done() state =", "\"Beer\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert", "test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that it fetches the given payload.\"\"\" topic = \"test/camera\" await", "== 1 async def test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test removal of discovered camera.\"\"\" entry", "state = opp.states.get(\"camera.beer\") assert state is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state", "{ \"camera\": [ { \"platform\": \"mqtt\", \"name\": \"Test Camera 1\", \"topic\": \"test-topic\", \"unique_id\":", "\"TOTALLY_UNIQUE\", } ] }, ) state = opp.states.get(\"camera.beer\") assert state is not None", "state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state", "mqtt_mock, caplog): \"\"\"Test update of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\",", "= opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\") client = await aiohttp_client(opp.http.app) resp = await client.get(url)", "async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' data2", "state is None async def test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test handling of bad discovery", "device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer == \"Whatever\" assert device.name == \"Beer\" assert", "aiohttp_client(opp.http.app) resp = await client.get(url) assert resp.status == 200 body = await resp.text()", "MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, ) async def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that it fetches", "\"Beer\" config[\"device\"][\"name\"] = \"Milk\" data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device", "camera, mqtt from openpeerpower.components.mqtt.discovery import async_start from openpeerpower.setup import async_setup_component from tests.common import", "\"helloworld\")}, set()) assert device is not None assert device.identifiers == {(\"mqtt\", \"helloworld\")} assert", "handling of bad discovery message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry)", "[ { \"platform\": \"mqtt\", \"name\": \"beer\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", } ] },", "openpeerpower.components import camera, mqtt from openpeerpower.components.mqtt.discovery import async_start from openpeerpower.setup import async_setup_component from", "== \"Milk\" state = opp.states.get(\"camera.beer\") assert state is None async def test_entity_id_update(opp, mqtt_mock):", "unique_id.\"\"\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", { \"camera\": [ { \"platform\": \"mqtt\",", "} data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")},", "( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, ) async def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that it", "mqtt_mock): \"\"\"Test MQTT camera device registry integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp,", "== 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) async def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT camera", "\"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\" }' data2 = '{ \"name\":", "\"Whatever\" assert device.name == \"Beer\" assert device.model == \"Glass\" assert device.sw_version == \"0.1-beta\"", "bad discovery message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 =", "from unittest.mock import ANY from openpeerpower.components import camera, mqtt from openpeerpower.components.mqtt.discovery import async_start", "] }, ) state = opp.states.get(\"camera.beer\") assert state is not None assert mock_mqtt.async_subscribe.call_count", "None assert state.name == \"Milk\" state = opp.states.get(\"camera.beer\") assert state is None async", "\"Glass\" assert device.sw_version == \"0.1-beta\" async def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device registry update.\"\"\"", "assert device is not None assert device.identifiers == {(\"mqtt\", \"helloworld\")} assert device.connections ==", "managed when entity_id is updated.\"\"\" registry = mock_registry(opp, {}) mock_mqtt = await async_mock_mqtt_component(opp)", "entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\",'", "'{ \"name\": \"Beer\" }' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp,", "await async_start(opp, \"openpeerpower\", {}, entry) data = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}'", "only creates one camera per unique_id.\"\"\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", {", "state is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done()", "assert state.name == \"Milk\" state = opp.states.get(\"camera.milk\") assert state is None async def", "\"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\",", "\"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } data = json.dumps(config)", "None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None", "openpeerpower.setup import async_setup_component from tests.common import ( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, ) async", "it fetches the given payload.\"\"\" topic = \"test/camera\" await async_mock_mqtt_component(opp) await async_setup_component( opp,", "is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) async def", "assert device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer == \"Whatever\" assert device.name == \"Beer\"", "client.get(url) assert resp.status == 200 body = await resp.text() assert body == \"beer\"", "\"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\": \"Beer\",", "is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state", "resp = await client.get(url) assert resp.status == 200 body = await resp.text() assert", "are managed when entity_id is updated.\"\"\" registry = mock_registry(opp, {}) mock_mqtt = await", "opp.helpers.device_registry.async_get_registry() data = json.dumps( { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": {", "MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() config =", "\"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\")", "\"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\",", "opp.states.get(\"camera.beer\") assert state is None state = opp.states.get(\"camera.milk\") assert state is not None", "async def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT camera device registry integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN)", "await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", {\"camera\": {\"platform\": \"mqtt\", \"topic\": topic, \"name\": \"Test", "async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", {\"camera\": {\"platform\": \"mqtt\", \"topic\": topic, \"name\": \"Test Camera\"}},", "\"Beer\",' ' \"topic\": \"test_topic\"}' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp,", "'{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state =", "async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None", "payload.\"\"\" topic = \"test/camera\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", {\"camera\": {\"platform\": \"mqtt\",", "await async_mock_mqtt_component(opp) assert await async_setup_component( opp, camera.DOMAIN, { camera.DOMAIN: [ { \"platform\": \"mqtt\",", "device registry integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry", "mqtt_mock): \"\"\"Test device registry update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {},", "opp.states.get(\"camera.milk\") assert state is not None assert state.name == \"Milk\" state = opp.states.get(\"camera.beer\")", "' \"topic\": \"test_topic\"}' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\",", "opp.states.get(\"camera.beer\") assert state is not None assert state.name == \"Milk\" state = opp.states.get(\"camera.milk\")", "{ camera.DOMAIN: [ { \"platform\": \"mqtt\", \"name\": \"beer\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }", "async_setup_component( opp, \"camera\", { \"camera\": [ { \"platform\": \"mqtt\", \"name\": \"Test Camera 1\",", "None state = opp.states.get(\"camera.milk\") assert state is not None assert mock_mqtt.async_subscribe.call_count == 1", "is not None assert state.name == \"Milk\" state = opp.states.get(\"camera.milk\") assert state is", "aiohttp_client): \"\"\"Test that it fetches the given payload.\"\"\" topic = \"test/camera\" await async_mock_mqtt_component(opp)", "Camera 1\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, { \"platform\": \"mqtt\", \"name\": \"Test Camera", "state = opp.states.get(\"camera.beer\") assert state is None state = opp.states.get(\"camera.milk\") assert state is", "entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() data = json.dumps(", "await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None assert state.name ==", "from openpeerpower.components.mqtt.discovery import async_start from openpeerpower.setup import async_setup_component from tests.common import ( MockConfigEntry,", "await async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() config = { \"platform\":", "\"veryunique\", } ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set())", "= opp.states.get(\"camera.beer\") assert state is None async def test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT subscriptions", "assert state is None async def test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT subscriptions are managed", "\"0.1-beta\", }, \"unique_id\": \"veryunique\", } data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done()", "}, \"unique_id\": \"veryunique\", } ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\",", "state is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.milk\") assert state", "\"\"\"Test that it fetches the given payload.\"\"\" topic = \"test/camera\" await async_mock_mqtt_component(opp) await", "= opp.states.get(\"camera.beer\") assert state is None state = opp.states.get(\"camera.milk\") assert state is not", "state is not None assert state.name == \"Milk\" state = opp.states.get(\"camera.milk\") assert state", "\"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.milk\") assert state is not None assert", "tests.common import ( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, ) async def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test", "1\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, { \"platform\": \"mqtt\", \"name\": \"Test Camera 2\",", "assert state is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await", "None assert device.name == \"Beer\" config[\"device\"][\"name\"] = \"Milk\" data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\",", "async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\" }' data2 = '{", "def test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT subscriptions are managed when entity_id is updated.\"\"\" registry", "\"platform\": \"mqtt\", \"name\": \"Test Camera 2\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, ] },", "= registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert device.name == \"Beer\"", "await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async def test_discovery_update_camera(opp, mqtt_mock,", "opp.async_block_till_done() state = opp.states.get(\"camera.milk\") assert state is not None assert state.name == \"Milk\"", "assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert", "test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test update of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp,", "resp.status == 200 body = await resp.text() assert body == \"beer\" async def", "state = opp.states.get(\"camera.beer\") assert state is None async def test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT", "await resp.text() assert body == \"beer\" async def test_unique_id(opp): \"\"\"Test unique id option", "\"camera\", { \"camera\": [ { \"platform\": \"mqtt\", \"name\": \"Test Camera 1\", \"topic\": \"test-topic\",", "} ] }, ) state = opp.states.get(\"camera.beer\") assert state is not None assert", "camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data = '{ \"name\":", "200 body = await resp.text() assert body == \"beer\" async def test_unique_id(opp): \"\"\"Test", "async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.milk\") assert state is not None", "\"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } ) async_fire_mqtt_message(opp,", "opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert device.name", "of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data =", "mock_registry(opp, {}) mock_mqtt = await async_mock_mqtt_component(opp) assert await async_setup_component( opp, camera.DOMAIN, { camera.DOMAIN:", "def test_unique_id(opp): \"\"\"Test unique id option only creates one camera per unique_id.\"\"\" await", "the given payload.\"\"\" topic = \"test/camera\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", {\"camera\":", "assert device is not None assert device.name == \"Beer\" config[\"device\"][\"name\"] = \"Milk\" data", "ANY, 0, None) async def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT camera device registry integration.\"\"\"", "\"TOTALLY_UNIQUE\", }, ] }, ) async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert len(opp.states.async_all()) == 1 async", "opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None assert state.name == \"Beer\"", "assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) async def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test", "from openpeerpower.setup import async_setup_component from tests.common import ( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, )", "is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done() state", "data2) await opp.async_block_till_done() state = opp.states.get(\"camera.milk\") assert state is not None assert state.name", "topic, \"beer\") client = await aiohttp_client(opp.http.app) resp = await client.get(url) assert resp.status ==", "await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", { \"camera\": [ { \"platform\": \"mqtt\", \"name\":", "\"beer\" async def test_unique_id(opp): \"\"\"Test unique id option only creates one camera per", "opp.states.get(\"camera.beer\") assert state is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\")", "\"TOTALLY_UNIQUE\", }, { \"platform\": \"mqtt\", \"name\": \"Test Camera 2\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\",", "\"payload\") assert len(opp.states.async_all()) == 1 async def test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test removal of", "message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\":", "== \"beer\" async def test_unique_id(opp): \"\"\"Test unique id option only creates one camera", "config[\"device\"][\"name\"] = \"Milk\" data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device =", "registry = mock_registry(opp, {}) mock_mqtt = await async_mock_mqtt_component(opp) assert await async_setup_component( opp, camera.DOMAIN,", "await async_setup_component( opp, \"camera\", { \"camera\": [ { \"platform\": \"mqtt\", \"name\": \"Test Camera", "{ \"platform\": \"mqtt\", \"name\": \"beer\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", } ] }, )", "update of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1", "\"unique_id\": \"TOTALLY_UNIQUE\", } ] }, ) state = opp.states.get(\"camera.beer\") assert state is not", "subscriptions are managed when entity_id is updated.\"\"\" registry = mock_registry(opp, {}) mock_mqtt =", "\"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None", "import json from unittest.mock import ANY from openpeerpower.components import camera, mqtt from openpeerpower.components.mqtt.discovery", "import async_setup_component from tests.common import ( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, ) async def", "await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\" }' data2 =", "data1 = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' data2 = '{ \"name\": \"Milk\",'", "'{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' data2 = '{ \"name\": \"Milk\",' ' \"topic\":", "\"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, ] }, ) async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert len(opp.states.async_all())", "fetches the given payload.\"\"\" topic = \"test/camera\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\",", "\"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert", "import ANY from openpeerpower.components import camera, mqtt from openpeerpower.components.mqtt.discovery import async_start from openpeerpower.setup", "ANY from openpeerpower.components import camera, mqtt from openpeerpower.components.mqtt.discovery import async_start from openpeerpower.setup import", "await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await", "Camera 2\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, ] }, ) async_fire_mqtt_message(opp, \"test-topic\", \"payload\")", "{ \"platform\": \"mqtt\", \"name\": \"Test Camera 1\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, {", "camera per unique_id.\"\"\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", { \"camera\": [ {", "== {(\"mqtt\", \"helloworld\")} assert device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer == \"Whatever\" assert", "async_setup_component from tests.common import ( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, ) async def test_run_camera_setup(opp,", "}, { \"platform\": \"mqtt\", \"name\": \"Test Camera 2\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", },", "state.name == \"Milk\" state = opp.states.get(\"camera.beer\") assert state is None async def test_entity_id_update(opp,", "is not None assert state.name == \"Milk\" state = opp.states.get(\"camera.beer\") assert state is", "1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\")", "\"beer\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", } ] }, ) state = opp.states.get(\"camera.beer\") assert", "= \"test/camera\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", {\"camera\": {\"platform\": \"mqtt\", \"topic\": topic,", "test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test handling of bad discovery message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await", "\"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\":", "integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry = await", "\"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } data", "is None async def test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test handling of bad discovery message.\"\"\"", "async_mock_mqtt_component, mock_registry, ) async def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that it fetches the given", "}, ) state = opp.states.get(\"camera.beer\") assert state is not None assert mock_mqtt.async_subscribe.call_count ==", "assert device.manufacturer == \"Whatever\" assert device.name == \"Beer\" assert device.model == \"Glass\" assert", "\"Milk\" data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")},", "assert device.name == \"Beer\" assert device.model == \"Glass\" assert device.sw_version == \"0.1-beta\" async", "await client.get(url) assert resp.status == 200 body = await resp.text() assert body ==", "{ \"platform\": \"mqtt\", \"name\": \"Test Camera 2\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, ]", "test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device registry update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\",", "async def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device registry update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await", "async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert len(opp.states.async_all()) == 1 async def test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test", "= await aiohttp_client(opp.http.app) resp = await client.get(url) assert resp.status == 200 body =", "None async def test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT subscriptions are managed when entity_id is", "opp.states.get(\"camera.milk\") assert state is None async def test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test handling of", "assert state is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.milk\") assert", "is not None assert device.name == \"Beer\" config[\"device\"][\"name\"] = \"Milk\" data = json.dumps(config)", "\"\"\"Test update of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry)", "\"Beer\" }' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1)", "= opp.states.get(\"camera.beer\") assert state is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY,", "\"mqtt\", \"name\": \"Test Camera 2\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, ] }, )", "\"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data)", "await aiohttp_client(opp.http.app) resp = await client.get(url) assert resp.status == 200 body = await", "opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async def test_discovery_update_camera(opp, mqtt_mock, caplog):", "MQTT camera device registry integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {},", "registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert device.name == \"Beer\" config[\"device\"][\"name\"]", "{\"platform\": \"mqtt\", \"topic\": topic, \"name\": \"Test Camera\"}}, ) url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic,", "\"02:5b:26:a8:dc:12\")} assert device.manufacturer == \"Whatever\" assert device.name == \"Beer\" assert device.model == \"Glass\"", "unique id option only creates one camera per unique_id.\"\"\" await async_mock_mqtt_component(opp) await async_setup_component(", "\"name\": \"beer\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", } ] }, ) state = opp.states.get(\"camera.beer\")", "await async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() data = json.dumps( {", "mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) async def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT", "registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert device.identifiers == {(\"mqtt\", \"helloworld\")}", "\"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not", "async_mock_mqtt_component(opp) assert await async_setup_component( opp, camera.DOMAIN, { camera.DOMAIN: [ { \"platform\": \"mqtt\", \"name\":", "def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device registry update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp,", "\"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None assert", "unittest.mock import ANY from openpeerpower.components import camera, mqtt from openpeerpower.components.mqtt.discovery import async_start from", "assert device.sw_version == \"0.1-beta\" async def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device registry update.\"\"\" entry", "= opp.states.get(\"camera.milk\") assert state is not None assert state.name == \"Milk\" state =", "= await resp.text() assert body == \"beer\" async def test_unique_id(opp): \"\"\"Test unique id", "state = opp.states.get(\"camera.beer\") assert state is None async def test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test", "= opp.states.get(\"camera.beer\") assert state is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\",", "\"mqtt\", \"name\": \"beer\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", } ] }, ) state =", "device.sw_version == \"0.1-beta\" async def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device registry update.\"\"\" entry =", "async def test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test removal of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN)", "opp.states.get(\"camera.beer\") assert state is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.milk\")", "await async_setup_component( opp, camera.DOMAIN, { camera.DOMAIN: [ { \"platform\": \"mqtt\", \"name\": \"beer\", \"topic\":", "entry) registry = await opp.helpers.device_registry.async_get_registry() data = json.dumps( { \"platform\": \"mqtt\", \"name\": \"<NAME>\",", "\"name\": \"Test Camera\"}}, ) url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\") client = await", "\"name\": \"Beer\" }' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\",", "camera.DOMAIN: [ { \"platform\": \"mqtt\", \"name\": \"beer\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", } ]", "caplog): \"\"\"Test handling of bad discovery message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\",", "\"test_topic\"}' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await", "data = json.dumps( { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\":", "topic, \"name\": \"Test Camera\"}}, ) url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\") client =", "opp.states.get(\"camera.beer\") assert state is None async def test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT subscriptions are", "body == \"beer\" async def test_unique_id(opp): \"\"\"Test unique id option only creates one", "assert await async_setup_component( opp, camera.DOMAIN, { camera.DOMAIN: [ { \"platform\": \"mqtt\", \"name\": \"beer\",", "state is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) async", "await async_setup_component( opp, \"camera\", {\"camera\": {\"platform\": \"mqtt\", \"topic\": topic, \"name\": \"Test Camera\"}}, )", "opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None assert state.name == \"Milk\"", "json from unittest.mock import ANY from openpeerpower.components import camera, mqtt from openpeerpower.components.mqtt.discovery import", "creates one camera per unique_id.\"\"\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", { \"camera\":", "is updated.\"\"\" registry = mock_registry(opp, {}) mock_mqtt = await async_mock_mqtt_component(opp) assert await async_setup_component(", "assert resp.status == 200 body = await resp.text() assert body == \"beer\" async", "\"test/camera\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", {\"camera\": {\"platform\": \"mqtt\", \"topic\": topic, \"name\":", "data = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done()", "mock_mqtt = await async_mock_mqtt_component(opp) assert await async_setup_component( opp, camera.DOMAIN, { camera.DOMAIN: [ {", "state = opp.states.get(\"camera.beer\") assert state is not None assert state.name == \"Milk\" state", "None async def test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test update of discovered camera.\"\"\" entry =", "opp.states.get(\"camera.beer\") assert state is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2)", "== \"Whatever\" assert device.name == \"Beer\" assert device.model == \"Glass\" assert device.sw_version ==", "2\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, ] }, ) async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert", "= await client.get(url) assert resp.status == 200 body = await resp.text() assert body", "url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\") client = await aiohttp_client(opp.http.app) resp = await", "state is None async def test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test update of discovered camera.\"\"\"", "\"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is", "state = opp.states.get(\"camera.beer\") assert state is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\",", "one camera per unique_id.\"\"\" await async_mock_mqtt_component(opp) await async_setup_component( opp, \"camera\", { \"camera\": [", "\"helloworld\")}, set()) assert device is not None assert device.name == \"Beer\" config[\"device\"][\"name\"] =", "= opp.states.get(\"camera.milk\") assert state is None async def test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test handling", "not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\")", "None assert state.name == \"Milk\" state = opp.states.get(\"camera.milk\") assert state is None async", "registry integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry =", "openpeerpower.components.mqtt.discovery import async_start from openpeerpower.setup import async_setup_component from tests.common import ( MockConfigEntry, async_fire_mqtt_message,", "MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data = '{ \"name\": \"Beer\",' ' \"topic\":", "async def test_entity_id_update(opp, mqtt_mock): \"\"\"Test MQTT subscriptions are managed when entity_id is updated.\"\"\"", "tests for mqtt camera component.\"\"\" import json from unittest.mock import ANY from openpeerpower.components", ") async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert len(opp.states.async_all()) == 1 async def test_discovery_removal_camera(opp, mqtt_mock, caplog):", "assert state.name == \"Milk\" state = opp.states.get(\"camera.beer\") assert state is None async def", "state = opp.states.get(\"camera.milk\") assert state is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\",", "test_discovery_removal_camera(opp, mqtt_mock, caplog): \"\"\"Test removal of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp,", "assert state is None state = opp.states.get(\"camera.milk\") assert state is not None assert", "None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\")", "assert device.model == \"Glass\" assert device.sw_version == \"0.1-beta\" async def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test", "Camera\"}}, ) url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\") client = await aiohttp_client(opp.http.app) resp", "entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() config = {", "\"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await", "device.name == \"Beer\" assert device.model == \"Glass\" assert device.sw_version == \"0.1-beta\" async def", "update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry = await", "await opp.helpers.device_registry.async_get_registry() config = { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": {", "<reponame>pcaston/Open-Peer-Power \"\"\"The tests for mqtt camera component.\"\"\" import json from unittest.mock import ANY", "assert state is None async def test_discovery_update_camera(opp, mqtt_mock, caplog): \"\"\"Test update of discovered", "MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() data =", "await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}'", "= opp.states.get(\"camera.beer\") assert state is not None assert state.name == \"Milk\" state =", "opp, camera.DOMAIN, { camera.DOMAIN: [ { \"platform\": \"mqtt\", \"name\": \"beer\", \"topic\": \"test-topic\", \"unique_id\":", "= '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' data2 = '{ \"name\": \"Milk\",' '", "registry update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry =", "device.identifiers == {(\"mqtt\", \"helloworld\")} assert device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer == \"Whatever\"", "\"unique_id\": \"TOTALLY_UNIQUE\", }, { \"platform\": \"mqtt\", \"name\": \"Test Camera 2\", \"topic\": \"test-topic\", \"unique_id\":", "data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None", "\"platform\": \"mqtt\", \"name\": \"beer\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", } ] }, ) state", "\"Milk\" state = opp.states.get(\"camera.beer\") assert state is None async def test_entity_id_update(opp, mqtt_mock): \"\"\"Test", "\"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await", "= MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() config", "= opp.states.get(\"camera.beer\") assert state is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state =", "mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None state", "== \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is", "mqtt camera component.\"\"\" import json from unittest.mock import ANY from openpeerpower.components import camera,", "= json.dumps( { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"],", "\"helloworld\")} assert device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer == \"Whatever\" assert device.name ==", "\"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done()", "import ( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry, ) async def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that", "\"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\")", ") url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\") client = await aiohttp_client(opp.http.app) resp =", "\"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device", "{}, entry) data1 = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' data2 = '{", "\"beer\") client = await aiohttp_client(opp.http.app) resp = await client.get(url) assert resp.status == 200", "opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert device.identifiers", "= json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert", "assert state is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await", "== \"Glass\" assert device.sw_version == \"0.1-beta\" async def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device registry", "opp, \"camera\", {\"camera\": {\"platform\": \"mqtt\", \"topic\": topic, \"name\": \"Test Camera\"}}, ) url =", "async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async_fire_mqtt_message(opp,", "of bad discovery message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1", "None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await", "discovery message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{", "\"unique_id\": \"veryunique\", } ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")},", "assert body == \"beer\" async def test_unique_id(opp): \"\"\"Test unique id option only creates", "device is not None assert device.name == \"Beer\" config[\"device\"][\"name\"] = \"Milk\" data =", "{}, entry) registry = await opp.helpers.device_registry.async_get_registry() config = { \"platform\": \"mqtt\", \"name\": \"<NAME>\",", "async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async", "\"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not", "' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state", "\"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]],", "opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None state = opp.states.get(\"camera.milk\") assert state", "await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert", "\"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() config = { \"platform\": \"mqtt\", \"name\":", "\"\"\"Test MQTT subscriptions are managed when entity_id is updated.\"\"\" registry = mock_registry(opp, {})", "\"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is", "= registry.async_get_device({(\"mqtt\", \"helloworld\")}, set()) assert device is not None assert device.identifiers == {(\"mqtt\",", "opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done()", "MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\",' ' \"topic\":", "= \"Milk\" data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\",", "== \"0.1-beta\" async def test_entity_device_info_update(opp, mqtt_mock): \"\"\"Test device registry update.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN)", "entry) data = '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await", "state is None state = opp.states.get(\"camera.milk\") assert state is not None assert mock_mqtt.async_subscribe.call_count", "= MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp) await async_start(opp, \"openpeerpower\", {}, entry) registry = await opp.helpers.device_registry.async_get_registry() data", "\"Test Camera 1\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, { \"platform\": \"mqtt\", \"name\": \"Test", "async def test_unique_id(opp): \"\"\"Test unique id option only creates one camera per unique_id.\"\"\"", "async_setup_component( opp, camera.DOMAIN, { camera.DOMAIN: [ { \"platform\": \"mqtt\", \"name\": \"beer\", \"topic\": \"test-topic\",", "caplog): \"\"\"Test removal of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {},", "mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done() state", "assert state is not None assert state.name == \"Milk\" state = opp.states.get(\"camera.milk\") assert", "\"unique_id\": \"TOTALLY_UNIQUE\", }, ] }, ) async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert len(opp.states.async_all()) == 1", "{(\"mqtt\", \"helloworld\")} assert device.connections == {(\"mac\", \"02:5b:26:a8:dc:12\")} assert device.manufacturer == \"Whatever\" assert device.name", "data1 = '{ \"name\": \"Beer\" }' data2 = '{ \"name\": \"Milk\",' ' \"topic\":", "= await opp.helpers.device_registry.async_get_registry() config = { \"platform\": \"mqtt\", \"name\": \"<NAME>\", \"topic\": \"test-topic\", \"device\":", "is not None assert mock_mqtt.async_subscribe.call_count == 1 mock_mqtt.async_subscribe.assert_any_call(\"test-topic\", ANY, 0, None) mock_mqtt.async_subscribe.reset_mock() registry.async_update_entity(\"camera.beer\",", "\"unique_id\": \"veryunique\", } data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device =", "assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data2) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert", "\"mqtt\", \"topic\": topic, \"name\": \"Test Camera\"}}, ) url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp, topic, \"beer\")", "mqtt_mock, caplog): \"\"\"Test removal of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\",", "\"0.1-beta\", }, \"unique_id\": \"veryunique\", } ) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device =", "mqtt_mock): \"\"\"Test MQTT subscriptions are managed when entity_id is updated.\"\"\" registry = mock_registry(opp,", "data) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is not None assert state.name", "not None assert state.name == \"Milk\" state = opp.states.get(\"camera.beer\") assert state is None", "state is not None assert state.name == \"Beer\" async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", \"\") await opp.async_block_till_done()", "async_setup_component( opp, \"camera\", {\"camera\": {\"platform\": \"mqtt\", \"topic\": topic, \"name\": \"Test Camera\"}}, ) url", "registry.async_update_entity(\"camera.beer\", new_entity_id=\"camera.milk\") await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None state =", "{}, entry) data1 = '{ \"name\": \"Beer\" }' data2 = '{ \"name\": \"Milk\",'", "\"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}'", "= MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry) data1 = '{ \"name\": \"Beer\" }'", "\"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, { \"platform\": \"mqtt\", \"name\": \"Test Camera 2\", \"topic\":", ") state = opp.states.get(\"camera.beer\") assert state is not None assert mock_mqtt.async_subscribe.call_count == 1", "\"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\",", "not None assert device.name == \"Beer\" config[\"device\"][\"name\"] = \"Milk\" data = json.dumps(config) async_fire_mqtt_message(opp,", "\"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\",", "\"test-topic\", \"device\": { \"identifiers\": [\"helloworld\"], \"connections\": [[\"mac\", \"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\":", "data = json.dumps(config) async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() device = registry.async_get_device({(\"mqtt\", \"helloworld\")}, set())", "def test_entity_device_info_with_identifier(opp, mqtt_mock): \"\"\"Test MQTT camera device registry integration.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) entry.add_to_opp(opp)", "\"02:5b:26:a8:dc:12\"]], \"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", }", "] }, ) async_fire_mqtt_message(opp, \"test-topic\", \"payload\") assert len(opp.states.async_all()) == 1 async def test_discovery_removal_camera(opp,", "{\"camera\": {\"platform\": \"mqtt\", \"topic\": topic, \"name\": \"Test Camera\"}}, ) url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"] async_fire_mqtt_message(opp,", "= '{ \"name\": \"Beer\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() state", "def test_discovery_broken(opp, mqtt_mock, caplog): \"\"\"Test handling of bad discovery message.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN)", "for mqtt camera component.\"\"\" import json from unittest.mock import ANY from openpeerpower.components import", "set()) assert device is not None assert device.identifiers == {(\"mqtt\", \"helloworld\")} assert device.connections", "\"openpeerpower/camera/bla/config\", data1) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state is None async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\",", "' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data) await opp.async_block_till_done() state = opp.states.get(\"camera.beer\") assert state", "mock_registry, ) async def test_run_camera_setup(opp, aiohttp_client): \"\"\"Test that it fetches the given payload.\"\"\"", "\"name\": \"Test Camera 1\", \"topic\": \"test-topic\", \"unique_id\": \"TOTALLY_UNIQUE\", }, { \"platform\": \"mqtt\", \"name\":", "\"manufacturer\": \"Whatever\", \"name\": \"Beer\", \"model\": \"Glass\", \"sw_version\": \"0.1-beta\", }, \"unique_id\": \"veryunique\", } )", "\"camera\", {\"camera\": {\"platform\": \"mqtt\", \"topic\": topic, \"name\": \"Test Camera\"}}, ) url = opp.states.get(\"camera.test_camera\").attributes[\"entity_picture\"]", "\"\"\"Test removal of discovered camera.\"\"\" entry = MockConfigEntry(domain=mqtt.DOMAIN) await async_start(opp, \"openpeerpower\", {}, entry)", "\"topic\": \"test_topic\"}' data2 = '{ \"name\": \"Milk\",' ' \"topic\": \"test_topic\"}' async_fire_mqtt_message(opp, \"openpeerpower/camera/bla/config\", data1)", "async_start from openpeerpower.setup import async_setup_component from tests.common import ( MockConfigEntry, async_fire_mqtt_message, async_mock_mqtt_component, mock_registry," ]
[ "= 'Raised by meta alarm' else: rospy.logwarn('Meta alarm callback for {} failed to", "isinstance(alarm,Alarm): alarm = alarm.as_msg() self._alarm_pub.publish(alarm) return True def _on_set_alarm(self, srv): self.set_alarm(srv.alarm) return True", "be raised if any of the child alarms are raised ''' return any(alarms.items())", "Adds meta alarms to the alarm server Meta alarms are special in that", "Sets or updates the alarm Updating the alarm triggers all of the alarms", "~handler_module param to the alarm server. ''' # If the param exists, load", "exists or a blank alarm ''' rospy.logdebug(\"Got request for alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name,", "it is the initial state ''' alarm = Alarm.blank(name) alarm.node_name = 'alarm_server' return", "# If it is a boolean, only update if it changes the raised", "def _on_get_alarm(self, srv): ''' Either returns the alarm request if it exists or", "of the child alarms are raised ''' return any(alarms.items()) self.meta_alarms[meta] = default def", "meta_alarm, sub_alarms): ''' Calls the meta_predicate callback for an alarm handler when one", "self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb) if __name__ == \"__main__\": rospy.init_node(\"alarm_server\") a = AlarmServer() rospy.spin()", "of the alarm_server so that users know it is the initial state '''", "want to # interface with these directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm)", "__import__(handler_module, fromlist=[\"\"]) for handler in [cls for name, cls in inspect.getmembers(alarm_handlers) if inspect.isclass(cls)", "...} Users can also provide more complex triggering mechanisms by providing an alarm", "{}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() # Outside", "srv): self.set_alarm(srv.alarm) return True def _on_get_alarm(self, srv): ''' Either returns the alarm request", "alarm): ''' Sets or updates the alarm Updating the alarm triggers all of", "for name, cls in inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and issubclass(cls, HandlerBase) and hasattr(cls, \"alarm_name\")", "meta, alarms in meta_alarms_dict.iteritems(): # Add the meta alarm if meta not in", "a the structure of a meta alarm. It has the following structure: {meta_alarm_name", "Alarm as AlarmMsg from ros_alarms.srv import AlarmGet, AlarmSet from ros_alarms import Alarm import", "Alarm or boolean'.format(meta_alarm)) return self.set_alarm(alarm) def _create_alarm_handlers(self): ''' Alarm handlers are classes imported", "child alarms are raised ''' return any(alarms.items()) self.meta_alarms[meta] = default def cb(alarm, meta_name=meta,", "def make_tagged_alarm(self, name): ''' Makes a blank alarm with the node_name of the", "is provided, then the meta-alarm will be raised if any of the child", "blank alarm ''' rospy.logdebug(\"Got request for alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self,", "meta alarm, we need to save the predicate if alarm_name in self.meta_alarms: self.meta_alarms[alarm_name]", "return True def _on_get_alarm(self, srv): ''' Either returns the alarm request if it", "[list of child alarm names], ...} Users can also provide more complex triggering", "triggering mechanisms by providing an alarm handler class with a 'meta_predicate' method. '''", "alarm server. ''' # If the param exists, load it here handler_module =", "#!/usr/bin/env python import rospy from ros_alarms import HandlerBase from ros_alarms.msg import Alarm as", ": [list of child alarm names], ...} Users can also provide more complex", "alarm request if it exists or a blank alarm ''' rospy.logdebug(\"Got request for", "creating metas) if hasattr(h, 'initial_alarm'): if alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] =", "of state of their child alarms. The /meta_alarms parameter defines a the structure", "alarm if meta not in self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta) def default(meta, alarms): '''", "the following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms()", "''' alarms = {name: alarm for name, alarm in self.alarms.items() if name in", "change of state of their child alarms. The /meta_alarms parameter defines a the", "instead triggered by a change of state of their child alarms. The /meta_alarms", "boolean'.format(meta_alarm)) return self.set_alarm(alarm) def _create_alarm_handlers(self): ''' Alarm handlers are classes imported by the", "changed. Then, updates the status of the parent alarm, if nessesary. ''' alarms", "this should return either an alarm object or a boolean for if should", "# Maps alarm name to Alarm objects self.alarms = {} # Handler classes", "self._create_alarm_handlers() # Outside interface to the alarm system. Usually you don't want to", "imported by the alarm server and run code upon a change of state", "callback for an alarm handler when one of its metal alarms has changed.", "'initial_alarm'): if alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] = h.initial_alarm # Update even", "method. ''' meta_alarms_dict = rospy.get_param(namespace, {}) for meta, alarms in meta_alarms_dict.iteritems(): # Add", "= {name: alarm for name, alarm in self.alarms.items() if name in sub_alarms} meta", "also provide more complex triggering mechanisms by providing an alarm handler class with", "_on_get_alarm(self, srv): ''' Either returns the alarm request if it exists or a", "an alarm handler class with a 'meta_predicate' method. ''' meta_alarms_dict = rospy.get_param(namespace, {})", "should be raised res = self.meta_alarms[meta_alarm](meta, alarms) # If it an alarm instance", "initial state if necessary (could have already been added while creating metas) if", "the module where the handlers are stored alarm_handlers = __import__(handler_module, fromlist=[\"\"]) for handler", "has changed. Then, updates the status of the parent alarm, if nessesary. '''", "else: rospy.logwarn('Meta alarm callback for {} failed to return an Alarm or boolean'.format(meta_alarm))", "rospy from ros_alarms import HandlerBase from ros_alarms.msg import Alarm as AlarmMsg from ros_alarms.srv", "you don't want to # interface with these directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\",", "# interface with these directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def set_alarm(self,", "Either returns the alarm request if it exists or a blank alarm '''", "Check the predicate, this should return either an alarm object or a boolean", "in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] = h.initial_alarm # Update even if already added", "'Raised by meta alarm' else: rospy.logwarn('Meta alarm callback for {} failed to return", "HandlerBase._init(self) # Import the module where the handlers are stored alarm_handlers = __import__(handler_module,", "Calls the meta_predicate callback for an alarm handler when one of its metal", "alarm.alarm_name = meta_alarm # Ensure alarm name is correct elif type(res) == bool:", "then the meta-alarm will be raised if any of the child alarms are", "def set_alarm(self, alarm): ''' Sets or updates the alarm Updating the alarm triggers", "description alarm.problem_description = 'Raised by meta alarm' else: rospy.logwarn('Meta alarm callback for {}", "most the following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100)", "is if isinstance(res, Alarm): alarm = res alarm.alarm_name = meta_alarm # Ensure alarm", "alarm.raised: # If it is raised, set problem description alarm.problem_description = 'Raised by", "# Ensure alarm name is correct elif type(res) == bool: # If it", "self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name): ''' Makes a blank alarm with the node_name", "the param exists, load it here handler_module = rospy.get_param(\"~handler_module\", None) if handler_module is", "HandlerBase from ros_alarms.msg import Alarm as AlarmMsg from ros_alarms.srv import AlarmGet, AlarmSet from", "for an alarm handler when one of its metal alarms has changed. Then,", "is correct elif type(res) == bool: # If it is a boolean, only", "names], ...} Users can also provide more complex triggering mechanisms by providing an", "Update even if already added to server elif alarm_name not in self.alarms: #", "if alarm not in self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb) if __name__ == \"__main__\":", "the node_name of the alarm_server so that users know it is the initial", "self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] = h.initial_alarm # Update even if already added to", "load it here handler_module = rospy.get_param(\"~handler_module\", None) if handler_module is None: return #", "or a blank alarm ''' rospy.logdebug(\"Got request for alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp()", "alarm names to predicate Handler functions self.meta_alarms = {} msg = \"Expecting at", "# Update even if already added to server elif alarm_name not in self.alarms:", "inspect class AlarmServer(object): def __init__(self): # Maps alarm name to Alarm objects self.alarms", "''' Makes a blank alarm with the node_name of the alarm_server so that", "''' alarm = Alarm.blank(name) alarm.node_name = 'alarm_server' return alarm def _handle_meta_alarm(self, meta_alarm, sub_alarms):", "alarm handler class with a 'meta_predicate' method. ''' meta_alarms_dict = rospy.get_param(namespace, {}) for", "ros_alarms.srv import AlarmGet, AlarmSet from ros_alarms import Alarm import inspect class AlarmServer(object): def", "self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm = alarm.as_msg() self._alarm_pub.publish(alarm) return True def _on_set_alarm(self,", "interface with these directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def set_alarm(self, alarm):", "it changes the raised status raised_status = res if raised_status == meta.raised: return", "if already added to server elif alarm_name not in self.alarms: # Add default", "server elif alarm_name not in self.alarms: # Add default initial if not there", "alarms has changed. Then, updates the status of the parent alarm, if nessesary.", "parameter defines a the structure of a meta alarm. It has the following", "_create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds meta alarms to the alarm server Meta alarms are", "to # interface with these directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def", "are special in that they are not directly raised or cleared but are", "meta_alarms_dict.iteritems(): # Add the meta alarm if meta not in self.alarms: self.alarms[meta] =", "self.make_tagged_alarm(alarm_name) else: pass # If a handler exists for a meta alarm, we", "in inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and issubclass(cls, HandlerBase) and hasattr(cls, \"alarm_name\") and name is", "the class exists exists h = handler() alarm_name = handler.alarm_name # Set initial", "cls in inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and issubclass(cls, HandlerBase) and hasattr(cls, \"alarm_name\") and name", "handlers access to alarm server HandlerBase._init(self) # Import the module where the handlers", "h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds meta alarms to the", "raised_status == meta.raised: return alarm = meta.as_msg() alarm.raised = bool(raised_status) if alarm.raised: #", "has the following structure: {meta_alarm_name : [list of child alarm names], ...} Users", "in meta_alarms_dict.iteritems(): # Add the meta alarm if meta not in self.alarms: self.alarms[meta]", "to predicate Handler functions self.meta_alarms = {} msg = \"Expecting at most the", "specified with the ~handler_module param to the alarm server. ''' # If the", "so that users know it is the initial state ''' alarm = Alarm.blank(name)", "\"Expecting at most the following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg,", "request for alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name): ''' Makes a", "= {} # Maps meta alarm names to predicate Handler functions self.meta_alarms =", "alarm server and run code upon a change of state of their respective", "an __init__.py) and in the python path. They will be loaded from the", "child alarm names], ...} Users can also provide more complex triggering mechanisms by", "with the ~handler_module param to the alarm server. ''' # If the param", "queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() # Outside interface to the alarm system. Usually you don't", "meta.raised: return alarm = meta.as_msg() alarm.raised = bool(raised_status) if alarm.raised: # If it", "# If a handler exists for a meta alarm, we need to save", "# Handler classes for overwriting default alarm functionality self.handlers = {} # Maps", "is not \"HandlerBase\"]: # Have to instantiate so the class exists exists h", "# If it an alarm instance send it out as is if isinstance(res,", "the alarm_server so that users know it is the initial state ''' alarm", "all of the alarms callbacks ''' if alarm.alarm_name in self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm)", "= 'alarm_server' return alarm def _handle_meta_alarm(self, meta_alarm, sub_alarms): ''' Calls the meta_predicate callback", "predicate, this should return either an alarm object or a boolean for if", "Alarm.blank(name) alarm.node_name = 'alarm_server' return alarm def _handle_meta_alarm(self, meta_alarm, sub_alarms): ''' Calls the", "meta alarm if meta not in self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta) def default(meta, alarms):", "\"HandlerBase\"]: # Have to instantiate so the class exists exists h = handler()", "alarm for name, alarm in self.alarms.items() if name in sub_alarms} meta = self.alarms[meta_alarm]", "is None: return # Give handlers access to alarm server HandlerBase._init(self) # Import", "''' Sets or updates the alarm Updating the alarm triggers all of the", "self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm = alarm.as_msg() self._alarm_pub.publish(alarm) return", "if name in sub_alarms} meta = self.alarms[meta_alarm] # Check the predicate, this should", "will be raised if any of the child alarms are raised ''' return", "srv): ''' Either returns the alarm request if it exists or a blank", "returns the alarm request if it exists or a blank alarm ''' rospy.logdebug(\"Got", "'meta_predicate' method. ''' meta_alarms_dict = rospy.get_param(namespace, {}) for meta, alarms in meta_alarms_dict.iteritems(): #", "''' # If the param exists, load it here handler_module = rospy.get_param(\"~handler_module\", None)", "are instead triggered by a change of state of their child alarms. The", "for a meta alarm, we need to save the predicate if alarm_name in", "return False if alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if isinstance(alarm,Alarm):", "(could have already been added while creating metas) if hasattr(h, 'initial_alarm'): if alarm_name", "if alarm.alarm_name in self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm) if res is False: return False", "AlarmServer(object): def __init__(self): # Maps alarm name to Alarm objects self.alarms = {}", "HandlerBase) and hasattr(cls, \"alarm_name\") and name is not \"HandlerBase\"]: # Have to instantiate", "{} # Maps meta alarm names to predicate Handler functions self.meta_alarms = {}", "by meta alarm' else: rospy.logwarn('Meta alarm callback for {} failed to return an", "parent alarm, if nessesary. ''' alarms = {name: alarm for name, alarm in", "class exists exists h = handler() alarm_name = handler.alarm_name # Set initial state", "system. Usually you don't want to # interface with these directly. rospy.Service(\"/alarm/set\", AlarmSet,", "'alarm_server' return alarm def _handle_meta_alarm(self, meta_alarm, sub_alarms): ''' Calls the meta_predicate callback for", "alarm_name = handler.alarm_name # Set initial state if necessary (could have already been", "msg = \"Expecting at most the following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub =", "state ''' alarm = Alarm.blank(name) alarm.node_name = 'alarm_server' return alarm def _handle_meta_alarm(self, meta_alarm,", "if res is False: return False if alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name]", "a blank alarm with the node_name of the alarm_server so that users know", "the ~handler_module param to the alarm server. ''' # If the param exists,", "interface to the alarm system. Usually you don't want to # interface with", "alarm system. Usually you don't want to # interface with these directly. rospy.Service(\"/alarm/set\",", "self.alarms: # Add default initial if not there already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else:", "overwriting default alarm functionality self.handlers = {} # Maps meta alarm names to", "self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def set_alarm(self, alarm): ''' Sets or updates the alarm", "callbacks ''' if alarm.alarm_name in self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm) if res is False:", "state of their child alarms. The /meta_alarms parameter defines a the structure of", "they are not directly raised or cleared but are instead triggered by a", "a blank alarm ''' rospy.logdebug(\"Got request for alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def", "no predicate for a meta-alarm is provided, then the meta-alarm will be raised", "its metal alarms has changed. Then, updates the status of the parent alarm,", "failed to return an Alarm or boolean'.format(meta_alarm)) return self.set_alarm(alarm) def _create_alarm_handlers(self): ''' Alarm", "it is a boolean, only update if it changes the raised status raised_status", "raised or cleared but are instead triggered by a change of state of", "here handler_module = rospy.get_param(\"~handler_module\", None) if handler_module is None: return # Give handlers", "Makes a blank alarm with the node_name of the alarm_server so that users", "self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm) if res is False: return False if alarm.alarm_name in", "to the alarm system. Usually you don't want to # interface with these", "= {} # Handler classes for overwriting default alarm functionality self.handlers = {}", "False if alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm", "res is False: return False if alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] =", "server Meta alarms are special in that they are not directly raised or", "to instantiate so the class exists exists h = handler() alarm_name = handler.alarm_name", "not in self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta) def default(meta, alarms): ''' If no predicate", "Alarm handlers are classes imported by the alarm server and run code upon", "if any of the child alarms are raised ''' return any(alarms.items()) self.meta_alarms[meta] =", "an Alarm or boolean'.format(meta_alarm)) return self.set_alarm(alarm) def _create_alarm_handlers(self): ''' Alarm handlers are classes", "alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name): ''' Makes a blank alarm", "if handler_module is None: return # Give handlers access to alarm server HandlerBase._init(self)", "Handlers should be in a python module (directory with an __init__.py) and in", "the following structure: {meta_alarm_name : [list of child alarm names], ...} Users can", "to Alarm objects self.alarms = {} # Handler classes for overwriting default alarm", "for handler in [cls for name, cls in inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and issubclass(cls,", "boolean for if should be raised res = self.meta_alarms[meta_alarm](meta, alarms) # If it", "# Outside interface to the alarm system. Usually you don't want to #", "hasattr(cls, \"alarm_name\") and name is not \"HandlerBase\"]: # Have to instantiate so the", "alarm object or a boolean for if should be raised res = self.meta_alarms[meta_alarm](meta,", "# Check the predicate, this should return either an alarm object or a", "complex triggering mechanisms by providing an alarm handler class with a 'meta_predicate' method.", "not directly raised or cleared but are instead triggered by a change of", "providing an alarm handler class with a 'meta_predicate' method. ''' meta_alarms_dict = rospy.get_param(namespace,", "alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm = alarm.as_msg()", "self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta) def default(meta, alarms): ''' If no predicate for a", "= Alarm.blank(name) alarm.node_name = 'alarm_server' return alarm def _handle_meta_alarm(self, meta_alarm, sub_alarms): ''' Calls", "self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name] = h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"): '''", "child alarms. The /meta_alarms parameter defines a the structure of a meta alarm.", "handler in [cls for name, cls in inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and issubclass(cls, HandlerBase)", "at most the following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True,", "namespace=\"meta_alarms/\"): ''' Adds meta alarms to the alarm server Meta alarms are special", "alarm name to Alarm objects self.alarms = {} # Handler classes for overwriting", "alarms. The /meta_alarms parameter defines a the structure of a meta alarm. It", "alarm functionality self.handlers = {} # Maps meta alarm names to predicate Handler", "rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds meta alarms to the alarm", "structure of a meta alarm. It has the following structure: {meta_alarm_name : [list", "alarm = Alarm.blank(name) alarm.node_name = 'alarm_server' return alarm def _handle_meta_alarm(self, meta_alarm, sub_alarms): '''", "python module (directory with an __init__.py) and in the python path. They will", "are stored alarm_handlers = __import__(handler_module, fromlist=[\"\"]) for handler in [cls for name, cls", "import rospy from ros_alarms import HandlerBase from ros_alarms.msg import Alarm as AlarmMsg from", "it here handler_module = rospy.get_param(\"~handler_module\", None) if handler_module is None: return # Give", "alarm.raised = bool(raised_status) if alarm.raised: # If it is raised, set problem description", "handler class with a 'meta_predicate' method. ''' meta_alarms_dict = rospy.get_param(namespace, {}) for meta,", "return any(alarms.items()) self.meta_alarms[meta] = default def cb(alarm, meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms) for", "in self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta) def default(meta, alarms): ''' If no predicate for", "def _create_alarm_handlers(self): ''' Alarm handlers are classes imported by the alarm server and", "an alarm object or a boolean for if should be raised res =", "to the alarm server Meta alarms are special in that they are not", "following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers()", "self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm = alarm.as_msg() self._alarm_pub.publish(alarm) return True", "alarm = meta.as_msg() alarm.raised = bool(raised_status) if alarm.raised: # If it is raised,", "callback for {} failed to return an Alarm or boolean'.format(meta_alarm)) return self.set_alarm(alarm) def", "only update if it changes the raised status raised_status = res if raised_status", "Give handlers access to alarm server HandlerBase._init(self) # Import the module where the", "already added to server elif alarm_name not in self.alarms: # Add default initial", "default initial if not there already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else: pass # If", "{name: alarm for name, alarm in self.alarms.items() if name in sub_alarms} meta =", "# Add the meta alarm if meta not in self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta)", "path. They will be loaded from the module specified with the ~handler_module param", "python import rospy from ros_alarms import HandlerBase from ros_alarms.msg import Alarm as AlarmMsg", "alarms callbacks ''' if alarm.alarm_name in self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm) if res is", "class AlarmServer(object): def __init__(self): # Maps alarm name to Alarm objects self.alarms =", "alarms): ''' If no predicate for a meta-alarm is provided, then the meta-alarm", "meta_alarms_dict = rospy.get_param(namespace, {}) for meta, alarms in meta_alarms_dict.iteritems(): # Add the meta", "have already been added while creating metas) if hasattr(h, 'initial_alarm'): if alarm_name in", "correct elif type(res) == bool: # If it is a boolean, only update", "boolean, only update if it changes the raised status raised_status = res if", "instance send it out as is if isinstance(res, Alarm): alarm = res alarm.alarm_name", "raised status raised_status = res if raised_status == meta.raised: return alarm = meta.as_msg()", "alarm callback for {} failed to return an Alarm or boolean'.format(meta_alarm)) return self.set_alarm(alarm)", "set problem description alarm.problem_description = 'Raised by meta alarm' else: rospy.logwarn('Meta alarm callback", "Usually you don't want to # interface with these directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm)", "run code upon a change of state of their respective alarms. Handlers should", "stored alarm_handlers = __import__(handler_module, fromlist=[\"\"]) for handler in [cls for name, cls in", "Add the meta alarm if meta not in self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta) def", "def __init__(self): # Maps alarm name to Alarm objects self.alarms = {} #", "be raised res = self.meta_alarms[meta_alarm](meta, alarms) # If it an alarm instance send", "exists for a meta alarm, we need to save the predicate if alarm_name", "their child alarms. The /meta_alarms parameter defines a the structure of a meta", "if isinstance(alarm,Alarm): alarm = alarm.as_msg() self._alarm_pub.publish(alarm) return True def _on_set_alarm(self, srv): self.set_alarm(srv.alarm) return", "the alarm server Meta alarms are special in that they are not directly", "name in sub_alarms} meta = self.alarms[meta_alarm] # Check the predicate, this should return", "the raised status raised_status = res if raised_status == meta.raised: return alarm =", "been added while creating metas) if hasattr(h, 'initial_alarm'): if alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm)", "if not there already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else: pass # If a handler", "self.set_alarm(srv.alarm) return True def _on_get_alarm(self, srv): ''' Either returns the alarm request if", "one of its metal alarms has changed. Then, updates the status of the", "for overwriting default alarm functionality self.handlers = {} # Maps meta alarm names", "metal alarms has changed. Then, updates the status of the parent alarm, if", "already been added while creating metas) if hasattr(h, 'initial_alarm'): if alarm_name in self.alarms:", "more complex triggering mechanisms by providing an alarm handler class with a 'meta_predicate'", "for alarm in alarms: if alarm not in self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb)", "the predicate, this should return either an alarm object or a boolean for", "self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name] = h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"):", "not in self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb) if __name__ == \"__main__\": rospy.init_node(\"alarm_server\") a", "names to predicate Handler functions self.meta_alarms = {} msg = \"Expecting at most", "a 'meta_predicate' method. ''' meta_alarms_dict = rospy.get_param(namespace, {}) for meta, alarms in meta_alarms_dict.iteritems():", "alarm_server so that users know it is the initial state ''' alarm =", "objects self.alarms = {} # Handler classes for overwriting default alarm functionality self.handlers", "<reponame>naveenmaan/mil #!/usr/bin/env python import rospy from ros_alarms import HandlerBase from ros_alarms.msg import Alarm", "= res alarm.alarm_name = meta_alarm # Ensure alarm name is correct elif type(res)", "handler() alarm_name = handler.alarm_name # Set initial state if necessary (could have already", "meta_predicate callback for an alarm handler when one of its metal alarms has", "If the param exists, load it here handler_module = rospy.get_param(\"~handler_module\", None) if handler_module", "self.alarms[alarm_name] = h.initial_alarm # Update even if already added to server elif alarm_name", "module specified with the ~handler_module param to the alarm server. ''' # If", "alarm Updating the alarm triggers all of the alarms callbacks ''' if alarm.alarm_name", "ros_alarms import Alarm import inspect class AlarmServer(object): def __init__(self): # Maps alarm name", "alarm instance send it out as is if isinstance(res, Alarm): alarm = res", "self._handle_meta_alarm(meta_name, sub_alarms) for alarm in alarms: if alarm not in self.alarms: self.alarms[alarm] =", "as AlarmMsg from ros_alarms.srv import AlarmGet, AlarmSet from ros_alarms import Alarm import inspect", "cb(alarm, meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms) for alarm in alarms: if alarm not", "alarm = res alarm.alarm_name = meta_alarm # Ensure alarm name is correct elif", "module where the handlers are stored alarm_handlers = __import__(handler_module, fromlist=[\"\"]) for handler in", "= handler.alarm_name # Set initial state if necessary (could have already been added", "return # Give handlers access to alarm server HandlerBase._init(self) # Import the module", "the child alarms are raised ''' return any(alarms.items()) self.meta_alarms[meta] = default def cb(alarm,", "of their respective alarms. Handlers should be in a python module (directory with", "the module specified with the ~handler_module param to the alarm server. ''' #", "problem description alarm.problem_description = 'Raised by meta alarm' else: rospy.logwarn('Meta alarm callback for", "even if already added to server elif alarm_name not in self.alarms: # Add", "name, alarm in self.alarms.items() if name in sub_alarms} meta = self.alarms[meta_alarm] # Check", "= h.meta_predicate self.handlers[alarm_name] = h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds", "def _on_set_alarm(self, srv): self.set_alarm(srv.alarm) return True def _on_get_alarm(self, srv): ''' Either returns the", "{}) for meta, alarms in meta_alarms_dict.iteritems(): # Add the meta alarm if meta", "ros_alarms.msg import Alarm as AlarmMsg from ros_alarms.srv import AlarmGet, AlarmSet from ros_alarms import", "access to alarm server HandlerBase._init(self) # Import the module where the handlers are", "= h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds meta alarms to", "sub_alarms): ''' Calls the meta_predicate callback for an alarm handler when one of", "and run code upon a change of state of their respective alarms. Handlers", "name): ''' Makes a blank alarm with the node_name of the alarm_server so", "return alarm def _handle_meta_alarm(self, meta_alarm, sub_alarms): ''' Calls the meta_predicate callback for an", "are classes imported by the alarm server and run code upon a change", "alarm server HandlerBase._init(self) # Import the module where the handlers are stored alarm_handlers", "and hasattr(cls, \"alarm_name\") and name is not \"HandlerBase\"]: # Have to instantiate so", "Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name): ''' Makes a blank alarm with the node_name of", "__init__(self): # Maps alarm name to Alarm objects self.alarms = {} # Handler", "from ros_alarms.msg import Alarm as AlarmMsg from ros_alarms.srv import AlarmGet, AlarmSet from ros_alarms", "be loaded from the module specified with the ~handler_module param to the alarm", "or boolean'.format(meta_alarm)) return self.set_alarm(alarm) def _create_alarm_handlers(self): ''' Alarm handlers are classes imported by", "in alarms: if alarm not in self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb) if __name__", "changes the raised status raised_status = res if raised_status == meta.raised: return alarm", "an alarm handler when one of its metal alarms has changed. Then, updates", "when one of its metal alarms has changed. Then, updates the status of", "self.handlers[alarm.alarm_name].on_set(alarm) if res is False: return False if alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else:", "or a boolean for if should be raised res = self.meta_alarms[meta_alarm](meta, alarms) #", "if it exists or a blank alarm ''' rospy.logdebug(\"Got request for alarm: {}\".format(srv.alarm_name))", "the alarms callbacks ''' if alarm.alarm_name in self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm) if res", "to return an Alarm or boolean'.format(meta_alarm)) return self.set_alarm(alarm) def _create_alarm_handlers(self): ''' Alarm handlers", "already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else: pass # If a handler exists for a", "the alarm server. ''' # If the param exists, load it here handler_module", "import HandlerBase from ros_alarms.msg import Alarm as AlarmMsg from ros_alarms.srv import AlarmGet, AlarmSet", "a change of state of their respective alarms. Handlers should be in a", "with the node_name of the alarm_server so that users know it is the", "and name is not \"HandlerBase\"]: # Have to instantiate so the class exists", "{} # Handler classes for overwriting default alarm functionality self.handlers = {} #", "import Alarm import inspect class AlarmServer(object): def __init__(self): # Maps alarm name to", "self.meta_alarms[meta_alarm](meta, alarms) # If it an alarm instance send it out as is", "if necessary (could have already been added while creating metas) if hasattr(h, 'initial_alarm'):", "AlarmSet from ros_alarms import Alarm import inspect class AlarmServer(object): def __init__(self): # Maps", "handler_module = rospy.get_param(\"~handler_module\", None) if handler_module is None: return # Give handlers access", "== meta.raised: return alarm = meta.as_msg() alarm.raised = bool(raised_status) if alarm.raised: # If", "Meta alarms are special in that they are not directly raised or cleared", "{meta_alarm_name : [list of child alarm names], ...} Users can also provide more", "alarm, we need to save the predicate if alarm_name in self.meta_alarms: self.meta_alarms[alarm_name] =", "directly raised or cleared but are instead triggered by a change of state", "return alarm = meta.as_msg() alarm.raised = bool(raised_status) if alarm.raised: # If it is", "in a python module (directory with an __init__.py) and in the python path.", "we need to save the predicate if alarm_name in self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate", "AlarmMsg from ros_alarms.srv import AlarmGet, AlarmSet from ros_alarms import Alarm import inspect class", "added while creating metas) if hasattr(h, 'initial_alarm'): if alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else:", "rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def set_alarm(self, alarm): ''' Sets or updates", "the initial state ''' alarm = Alarm.blank(name) alarm.node_name = 'alarm_server' return alarm def", "alarm def _handle_meta_alarm(self, meta_alarm, sub_alarms): ''' Calls the meta_predicate callback for an alarm", "in sub_alarms} meta = self.alarms[meta_alarm] # Check the predicate, this should return either", "''' return any(alarms.items()) self.meta_alarms[meta] = default def cb(alarm, meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms)", "if raised_status == meta.raised: return alarm = meta.as_msg() alarm.raised = bool(raised_status) if alarm.raised:", "make_tagged_alarm(self, name): ''' Makes a blank alarm with the node_name of the alarm_server", "h.meta_predicate self.handlers[alarm_name] = h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds meta", "{}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds meta alarms to the alarm server Meta", "None: return # Give handlers access to alarm server HandlerBase._init(self) # Import the", "added to server elif alarm_name not in self.alarms: # Add default initial if", "alarm.as_msg() self._alarm_pub.publish(alarm) return True def _on_set_alarm(self, srv): self.set_alarm(srv.alarm) return True def _on_get_alarm(self, srv):", "isinstance(res, Alarm): alarm = res alarm.alarm_name = meta_alarm # Ensure alarm name is", "rospy.get_param(\"~handler_module\", None) if handler_module is None: return # Give handlers access to alarm", "res = self.handlers[alarm.alarm_name].on_set(alarm) if res is False: return False if alarm.alarm_name in self.alarms:", "''' Either returns the alarm request if it exists or a blank alarm", "predicate Handler functions self.meta_alarms = {} msg = \"Expecting at most the following", "in self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm) if res is False: return False if alarm.alarm_name", "of their child alarms. The /meta_alarms parameter defines a the structure of a", "metas) if hasattr(h, 'initial_alarm'): if alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] = h.initial_alarm", "self.alarms.items() if name in sub_alarms} meta = self.alarms[meta_alarm] # Check the predicate, this", "alarm names], ...} Users can also provide more complex triggering mechanisms by providing", "== bool: # If it is a boolean, only update if it changes", "from ros_alarms import HandlerBase from ros_alarms.msg import Alarm as AlarmMsg from ros_alarms.srv import", "a change of state of their child alarms. The /meta_alarms parameter defines a", "meta.as_msg() alarm.raised = bool(raised_status) if alarm.raised: # If it is raised, set problem", "alarm server Meta alarms are special in that they are not directly raised", "in the python path. They will be loaded from the module specified with", "class with a 'meta_predicate' method. ''' meta_alarms_dict = rospy.get_param(namespace, {}) for meta, alarms", "alarm, if nessesary. ''' alarms = {name: alarm for name, alarm in self.alarms.items()", "alarms. Handlers should be in a python module (directory with an __init__.py) and", "# Import the module where the handlers are stored alarm_handlers = __import__(handler_module, fromlist=[\"\"])", "state if necessary (could have already been added while creating metas) if hasattr(h,", "name is not \"HandlerBase\"]: # Have to instantiate so the class exists exists", "can also provide more complex triggering mechanisms by providing an alarm handler class", "= default def cb(alarm, meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms) for alarm in alarms:", "if alarm.raised: # If it is raised, set problem description alarm.problem_description = 'Raised", "instantiate so the class exists exists h = handler() alarm_name = handler.alarm_name #", "Updating the alarm triggers all of the alarms callbacks ''' if alarm.alarm_name in", "res if raised_status == meta.raised: return alarm = meta.as_msg() alarm.raised = bool(raised_status) if", "the status of the parent alarm, if nessesary. ''' alarms = {name: alarm", "any(alarms.items()) self.meta_alarms[meta] = default def cb(alarm, meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms) for alarm", "node_name of the alarm_server so that users know it is the initial state", "in self.alarms.items() if name in sub_alarms} meta = self.alarms[meta_alarm] # Check the predicate,", "functions self.meta_alarms = {} msg = \"Expecting at most the following alarms: {}\"", "directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def set_alarm(self, alarm): ''' Sets or", "the parent alarm, if nessesary. ''' alarms = {name: alarm for name, alarm", "res = self.meta_alarms[meta_alarm](meta, alarms) # If it an alarm instance send it out", "self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else: pass # If a handler exists for a meta", "structure: {meta_alarm_name : [list of child alarm names], ...} Users can also provide", "self.alarms = {} # Handler classes for overwriting default alarm functionality self.handlers =", "initial state ''' alarm = Alarm.blank(name) alarm.node_name = 'alarm_server' return alarm def _handle_meta_alarm(self,", "/meta_alarms parameter defines a the structure of a meta alarm. It has the", "alarm.alarm_name in self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm) if res is False: return False if", "functionality self.handlers = {} # Maps meta alarm names to predicate Handler functions", "from ros_alarms.srv import AlarmGet, AlarmSet from ros_alarms import Alarm import inspect class AlarmServer(object):", "if meta not in self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta) def default(meta, alarms): ''' If", "handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds meta alarms to the alarm server", "raised ''' return any(alarms.items()) self.meta_alarms[meta] = default def cb(alarm, meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name,", "where the handlers are stored alarm_handlers = __import__(handler_module, fromlist=[\"\"]) for handler in [cls", "Handler functions self.meta_alarms = {} msg = \"Expecting at most the following alarms:", "bool: # If it is a boolean, only update if it changes the", "{}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name): ''' Makes a blank alarm with", "handlers are stored alarm_handlers = __import__(handler_module, fromlist=[\"\"]) for handler in [cls for name,", "import Alarm as AlarmMsg from ros_alarms.srv import AlarmGet, AlarmSet from ros_alarms import Alarm", "return self._handle_meta_alarm(meta_name, sub_alarms) for alarm in alarms: if alarm not in self.alarms: self.alarms[alarm]", "''' If no predicate for a meta-alarm is provided, then the meta-alarm will", "not in self.alarms: # Add default initial if not there already self.alarms[alarm_name] =", "should be in a python module (directory with an __init__.py) and in the", "alarm with the node_name of the alarm_server so that users know it is", "alarm' else: rospy.logwarn('Meta alarm callback for {} failed to return an Alarm or", "in self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name] = h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self,", "so the class exists exists h = handler() alarm_name = handler.alarm_name # Set", "is False: return False if alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm)", "# Maps meta alarm names to predicate Handler functions self.meta_alarms = {} msg", "self.alarms[meta_alarm] # Check the predicate, this should return either an alarm object or", "necessary (could have already been added while creating metas) if hasattr(h, 'initial_alarm'): if", "server. ''' # If the param exists, load it here handler_module = rospy.get_param(\"~handler_module\",", "else: self.alarms[alarm_name] = h.initial_alarm # Update even if already added to server elif", "def cb(alarm, meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms) for alarm in alarms: if alarm", "it an alarm instance send it out as is if isinstance(res, Alarm): alarm", "alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() #", "by a change of state of their child alarms. The /meta_alarms parameter defines", "the python path. They will be loaded from the module specified with the", "meta alarms to the alarm server Meta alarms are special in that they", "alarm. It has the following structure: {meta_alarm_name : [list of child alarm names],", "is raised, set problem description alarm.problem_description = 'Raised by meta alarm' else: rospy.logwarn('Meta", "for meta, alarms in meta_alarms_dict.iteritems(): # Add the meta alarm if meta not", "_create_alarm_handlers(self): ''' Alarm handlers are classes imported by the alarm server and run", "# Set initial state if necessary (could have already been added while creating", "know it is the initial state ''' alarm = Alarm.blank(name) alarm.node_name = 'alarm_server'", "True def _on_get_alarm(self, srv): ''' Either returns the alarm request if it exists", "of its metal alarms has changed. Then, updates the status of the parent", "will be loaded from the module specified with the ~handler_module param to the", "of a meta alarm. It has the following structure: {meta_alarm_name : [list of", "don't want to # interface with these directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet,", "handler_module is None: return # Give handlers access to alarm server HandlerBase._init(self) #", "fromlist=[\"\"]) for handler in [cls for name, cls in inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and", "Alarm objects self.alarms = {} # Handler classes for overwriting default alarm functionality", "be in a python module (directory with an __init__.py) and in the python", "for name, alarm in self.alarms.items() if name in sub_alarms} meta = self.alarms[meta_alarm] #", "Outside interface to the alarm system. Usually you don't want to # interface", "and in the python path. They will be loaded from the module specified", "if alarm_name in self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name] = h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name))", "update if it changes the raised status raised_status = res if raised_status ==", "The /meta_alarms parameter defines a the structure of a meta alarm. It has", "= meta_alarm # Ensure alarm name is correct elif type(res) == bool: #", "from the module specified with the ~handler_module param to the alarm server. '''", "rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def set_alarm(self, alarm): ''' Sets or updates the alarm Updating", "meta alarm' else: rospy.logwarn('Meta alarm callback for {} failed to return an Alarm", "If a handler exists for a meta alarm, we need to save the", "alarm.node_name = 'alarm_server' return alarm def _handle_meta_alarm(self, meta_alarm, sub_alarms): ''' Calls the meta_predicate", "server HandlerBase._init(self) # Import the module where the handlers are stored alarm_handlers =", "meta-alarm is provided, then the meta-alarm will be raised if any of the", "# Have to instantiate so the class exists exists h = handler() alarm_name", "a boolean for if should be raised res = self.meta_alarms[meta_alarm](meta, alarms) # If", "Users can also provide more complex triggering mechanisms by providing an alarm handler", "upon a change of state of their respective alarms. Handlers should be in", "# Give handlers access to alarm server HandlerBase._init(self) # Import the module where", "defines a the structure of a meta alarm. It has the following structure:", "Import the module where the handlers are stored alarm_handlers = __import__(handler_module, fromlist=[\"\"]) for", "def _create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds meta alarms to the alarm server Meta alarms", "alarm in alarms: if alarm not in self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb) if", "handler.alarm_name # Set initial state if necessary (could have already been added while", "the alarm request if it exists or a blank alarm ''' rospy.logdebug(\"Got request", "latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() # Outside interface to the alarm system. Usually you", "# If the param exists, load it here handler_module = rospy.get_param(\"~handler_module\", None) if", "Maps alarm name to Alarm objects self.alarms = {} # Handler classes for", "with these directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def set_alarm(self, alarm): '''", "alarm_name not in self.alarms: # Add default initial if not there already self.alarms[alarm_name]", "that they are not directly raised or cleared but are instead triggered by", "Ensure alarm name is correct elif type(res) == bool: # If it is", "= meta.as_msg() alarm.raised = bool(raised_status) if alarm.raised: # If it is raised, set", "pass # If a handler exists for a meta alarm, we need to", "Handler classes for overwriting default alarm functionality self.handlers = {} # Maps meta", "these directly. rospy.Service(\"/alarm/set\", AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def set_alarm(self, alarm): ''' Sets", "classes for overwriting default alarm functionality self.handlers = {} # Maps meta alarm", "_on_set_alarm(self, srv): self.set_alarm(srv.alarm) return True def _on_get_alarm(self, srv): ''' Either returns the alarm", "alarms to the alarm server Meta alarms are special in that they are", "# If it is raised, set problem description alarm.problem_description = 'Raised by meta", "code upon a change of state of their respective alarms. Handlers should be", "_handle_meta_alarm(self, meta_alarm, sub_alarms): ''' Calls the meta_predicate callback for an alarm handler when", "respective alarms. Handlers should be in a python module (directory with an __init__.py)", "in that they are not directly raised or cleared but are instead triggered", "def _handle_meta_alarm(self, meta_alarm, sub_alarms): ''' Calls the meta_predicate callback for an alarm handler", "handlers are classes imported by the alarm server and run code upon a", "are not directly raised or cleared but are instead triggered by a change", "alarm = alarm.as_msg() self._alarm_pub.publish(alarm) return True def _on_set_alarm(self, srv): self.set_alarm(srv.alarm) return True def", "not there already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else: pass # If a handler exists", "meta = self.alarms[meta_alarm] # Check the predicate, this should return either an alarm", "it out as is if isinstance(res, Alarm): alarm = res alarm.alarm_name = meta_alarm", "if alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] = h.initial_alarm # Update even if", "alarms: if alarm not in self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb) if __name__ ==", "rospy.get_param(namespace, {}) for meta, alarms in meta_alarms_dict.iteritems(): # Add the meta alarm if", "rospy.logwarn('Meta alarm callback for {} failed to return an Alarm or boolean'.format(meta_alarm)) return", "is a boolean, only update if it changes the raised status raised_status =", "self.meta_alarms[meta] = default def cb(alarm, meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms) for alarm in", "of state of their respective alarms. Handlers should be in a python module", "a handler exists for a meta alarm, we need to save the predicate", "request if it exists or a blank alarm ''' rospy.logdebug(\"Got request for alarm:", "there already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else: pass # If a handler exists for", "AlarmGet, AlarmSet from ros_alarms import Alarm import inspect class AlarmServer(object): def __init__(self): #", "is the initial state ''' alarm = Alarm.blank(name) alarm.node_name = 'alarm_server' return alarm", "meta-alarm will be raised if any of the child alarms are raised '''", "default def cb(alarm, meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms) for alarm in alarms: if", "if should be raised res = self.meta_alarms[meta_alarm](meta, alarms) # If it an alarm", "alarm name is correct elif type(res) == bool: # If it is a", "import AlarmGet, AlarmSet from ros_alarms import Alarm import inspect class AlarmServer(object): def __init__(self):", "elif type(res) == bool: # If it is a boolean, only update if", "that users know it is the initial state ''' alarm = Alarm.blank(name) alarm.node_name", "if it changes the raised status raised_status = res if raised_status == meta.raised:", "a boolean, only update if it changes the raised status raised_status = res", "state of their respective alarms. Handlers should be in a python module (directory", "updates the status of the parent alarm, if nessesary. ''' alarms = {name:", "name, cls in inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and issubclass(cls, HandlerBase) and hasattr(cls, \"alarm_name\") and", "alarms) # If it an alarm instance send it out as is if", "= rospy.get_param(\"~handler_module\", None) if handler_module is None: return # Give handlers access to", "= rospy.get_param(namespace, {}) for meta, alarms in meta_alarms_dict.iteritems(): # Add the meta alarm", "If it is raised, set problem description alarm.problem_description = 'Raised by meta alarm'", "a python module (directory with an __init__.py) and in the python path. They", "in self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb) if __name__ == \"__main__\": rospy.init_node(\"alarm_server\") a =", "object or a boolean for if should be raised res = self.meta_alarms[meta_alarm](meta, alarms)", "triggers all of the alarms callbacks ''' if alarm.alarm_name in self.handlers: res =", "with an __init__.py) and in the python path. They will be loaded from", "They will be loaded from the module specified with the ~handler_module param to", "the handlers are stored alarm_handlers = __import__(handler_module, fromlist=[\"\"]) for handler in [cls for", "AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() # Outside interface to the alarm system. Usually", "''' if alarm.alarm_name in self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm) if res is False: return", "= handler() alarm_name = handler.alarm_name # Set initial state if necessary (could have", "triggered by a change of state of their child alarms. The /meta_alarms parameter", "either an alarm object or a boolean for if should be raised res", "= __import__(handler_module, fromlist=[\"\"]) for handler in [cls for name, cls in inspect.getmembers(alarm_handlers) if", "''' Adds meta alarms to the alarm server Meta alarms are special in", "alarms = {name: alarm for name, alarm in self.alarms.items() if name in sub_alarms}", "Have to instantiate so the class exists exists h = handler() alarm_name =", "of the alarms callbacks ''' if alarm.alarm_name in self.handlers: res = self.handlers[alarm.alarm_name].on_set(alarm) if", "for if should be raised res = self.meta_alarms[meta_alarm](meta, alarms) # If it an", "param exists, load it here handler_module = rospy.get_param(\"~handler_module\", None) if handler_module is None:", "exists, load it here handler_module = rospy.get_param(\"~handler_module\", None) if handler_module is None: return", "predicate for a meta-alarm is provided, then the meta-alarm will be raised if", "Then, updates the status of the parent alarm, if nessesary. ''' alarms =", "classes imported by the alarm server and run code upon a change of", "or updates the alarm Updating the alarm triggers all of the alarms callbacks", "alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] = h.initial_alarm # Update even if already", "meta_alarm # Ensure alarm name is correct elif type(res) == bool: # If", "change of state of their respective alarms. Handlers should be in a python", "the structure of a meta alarm. It has the following structure: {meta_alarm_name :", "h.initial_alarm # Update even if already added to server elif alarm_name not in", "status of the parent alarm, if nessesary. ''' alarms = {name: alarm for", "provided, then the meta-alarm will be raised if any of the child alarms", "special in that they are not directly raised or cleared but are instead", "return True def _on_set_alarm(self, srv): self.set_alarm(srv.alarm) return True def _on_get_alarm(self, srv): ''' Either", "an alarm instance send it out as is if isinstance(res, Alarm): alarm =", "for {} failed to return an Alarm or boolean'.format(meta_alarm)) return self.set_alarm(alarm) def _create_alarm_handlers(self):", "__init__.py) and in the python path. They will be loaded from the module", "save the predicate if alarm_name in self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name] = h", "[cls for name, cls in inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and issubclass(cls, HandlerBase) and hasattr(cls,", "exists h = handler() alarm_name = handler.alarm_name # Set initial state if necessary", "with a 'meta_predicate' method. ''' meta_alarms_dict = rospy.get_param(namespace, {}) for meta, alarms in", "= Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm = alarm.as_msg() self._alarm_pub.publish(alarm) return True def _on_set_alarm(self, srv):", "predicate if alarm_name in self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name] = h rospy.loginfo(\"Loaded handler:", "self.handlers[alarm_name] = h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def _create_meta_alarms(self, namespace=\"meta_alarms/\"): ''' Adds meta alarms", "= self.make_tagged_alarm(meta) def default(meta, alarms): ''' If no predicate for a meta-alarm is", "alarm not in self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb) if __name__ == \"__main__\": rospy.init_node(\"alarm_server\")", "Maps meta alarm names to predicate Handler functions self.meta_alarms = {} msg =", "self.meta_alarms = {} msg = \"Expecting at most the following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\",", "= \"Expecting at most the following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\",", "If it is a boolean, only update if it changes the raised status", "meta alarm. It has the following structure: {meta_alarm_name : [list of child alarm", "''' rospy.logdebug(\"Got request for alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name): '''", "by providing an alarm handler class with a 'meta_predicate' method. ''' meta_alarms_dict =", "{} msg = \"Expecting at most the following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub", "sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms) for alarm in alarms: if alarm not in self.alarms:", "to server elif alarm_name not in self.alarms: # Add default initial if not", "If no predicate for a meta-alarm is provided, then the meta-alarm will be", "by the alarm server and run code upon a change of state of", "updates the alarm Updating the alarm triggers all of the alarms callbacks '''", "alarm ''' rospy.logdebug(\"Got request for alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name):", "if nessesary. ''' alarms = {name: alarm for name, alarm in self.alarms.items() if", "= self.handlers[alarm.alarm_name].on_set(alarm) if res is False: return False if alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm)", "type(res) == bool: # If it is a boolean, only update if it", "name is correct elif type(res) == bool: # If it is a boolean,", "def default(meta, alarms): ''' If no predicate for a meta-alarm is provided, then", "the alarm Updating the alarm triggers all of the alarms callbacks ''' if", "# Add default initial if not there already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else: pass", "in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm = alarm.as_msg() self._alarm_pub.publish(alarm)", "a meta alarm, we need to save the predicate if alarm_name in self.meta_alarms:", "self._alarm_pub.publish(alarm) return True def _on_set_alarm(self, srv): self.set_alarm(srv.alarm) return True def _on_get_alarm(self, srv): '''", "handler when one of its metal alarms has changed. Then, updates the status", "if hasattr(h, 'initial_alarm'): if alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] = h.initial_alarm #", "the alarm system. Usually you don't want to # interface with these directly.", "self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() # Outside interface to the", "res alarm.alarm_name = meta_alarm # Ensure alarm name is correct elif type(res) ==", "''' Alarm handlers are classes imported by the alarm server and run code", "the meta alarm if meta not in self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta) def default(meta,", "if alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm =", "sub_alarms) for alarm in alarms: if alarm not in self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm)", "status raised_status = res if raised_status == meta.raised: return alarm = meta.as_msg() alarm.raised", "for alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name): ''' Makes a blank", "to alarm server HandlerBase._init(self) # Import the module where the handlers are stored", "self.set_alarm(alarm) def _create_alarm_handlers(self): ''' Alarm handlers are classes imported by the alarm server", "initial if not there already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else: pass # If a", "in [cls for name, cls in inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and issubclass(cls, HandlerBase) and", "AlarmSet, self._on_set_alarm) rospy.Service(\"/alarm/get\", AlarmGet, self._on_get_alarm) def set_alarm(self, alarm): ''' Sets or updates the", "return an Alarm or boolean'.format(meta_alarm)) return self.set_alarm(alarm) def _create_alarm_handlers(self): ''' Alarm handlers are", "blank alarm with the node_name of the alarm_server so that users know it", "alarm handler when one of its metal alarms has changed. Then, updates the", "\"alarm_name\") and name is not \"HandlerBase\"]: # Have to instantiate so the class", "of the parent alarm, if nessesary. ''' alarms = {name: alarm for name,", "''' meta_alarms_dict = rospy.get_param(namespace, {}) for meta, alarms in meta_alarms_dict.iteritems(): # Add the", "are raised ''' return any(alarms.items()) self.meta_alarms[meta] = default def cb(alarm, meta_name=meta, sub_alarms=alarms): return", "elif alarm_name not in self.alarms: # Add default initial if not there already", "to save the predicate if alarm_name in self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name] =", "inspect.isclass(cls) and issubclass(cls, HandlerBase) and hasattr(cls, \"alarm_name\") and name is not \"HandlerBase\"]: #", "or cleared but are instead triggered by a change of state of their", "Add default initial if not there already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name) else: pass #", "alarm_handlers = __import__(handler_module, fromlist=[\"\"]) for handler in [cls for name, cls in inspect.getmembers(alarm_handlers)", "it exists or a blank alarm ''' rospy.logdebug(\"Got request for alarm: {}\".format(srv.alarm_name)) return", "if inspect.isclass(cls) and issubclass(cls, HandlerBase) and hasattr(cls, \"alarm_name\") and name is not \"HandlerBase\"]:", "users know it is the initial state ''' alarm = Alarm.blank(name) alarm.node_name =", "(directory with an __init__.py) and in the python path. They will be loaded", "Set initial state if necessary (could have already been added while creating metas)", "h = handler() alarm_name = handler.alarm_name # Set initial state if necessary (could", "raised_status = res if raised_status == meta.raised: return alarm = meta.as_msg() alarm.raised =", "ros_alarms import HandlerBase from ros_alarms.msg import Alarm as AlarmMsg from ros_alarms.srv import AlarmGet,", "meta not in self.alarms: self.alarms[meta] = self.make_tagged_alarm(meta) def default(meta, alarms): ''' If no", "send it out as is if isinstance(res, Alarm): alarm = res alarm.alarm_name =", "= alarm.as_msg() self._alarm_pub.publish(alarm) return True def _on_set_alarm(self, srv): self.set_alarm(srv.alarm) return True def _on_get_alarm(self,", "= self.make_tagged_alarm(alarm_name) else: pass # If a handler exists for a meta alarm,", "the meta_predicate callback for an alarm handler when one of its metal alarms", "loaded from the module specified with the ~handler_module param to the alarm server.", "and issubclass(cls, HandlerBase) and hasattr(cls, \"alarm_name\") and name is not \"HandlerBase\"]: # Have", "the meta-alarm will be raised if any of the child alarms are raised", "cleared but are instead triggered by a change of state of their child", "False: return False if alarm.alarm_name in self.alarms: self.alarms[alarm.alarm_name].update(alarm) else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if", "their respective alarms. Handlers should be in a python module (directory with an", "a meta alarm. It has the following structure: {meta_alarm_name : [list of child", "from ros_alarms import Alarm import inspect class AlarmServer(object): def __init__(self): # Maps alarm", "self.make_tagged_alarm(meta) def default(meta, alarms): ''' If no predicate for a meta-alarm is provided,", "rospy.logdebug(\"Got request for alarm: {}\".format(srv.alarm_name)) return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name): ''' Makes", "meta alarm names to predicate Handler functions self.meta_alarms = {} msg = \"Expecting", "alarms are special in that they are not directly raised or cleared but", "the predicate if alarm_name in self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name] = h rospy.loginfo(\"Loaded", "alarms in meta_alarms_dict.iteritems(): # Add the meta alarm if meta not in self.alarms:", "default(meta, alarms): ''' If no predicate for a meta-alarm is provided, then the", "return either an alarm object or a boolean for if should be raised", "{} failed to return an Alarm or boolean'.format(meta_alarm)) return self.set_alarm(alarm) def _create_alarm_handlers(self): '''", "bool(raised_status) if alarm.raised: # If it is raised, set problem description alarm.problem_description =", "sub_alarms} meta = self.alarms[meta_alarm] # Check the predicate, this should return either an", "alarm_name in self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name] = h rospy.loginfo(\"Loaded handler: {}\".format(h.alarm_name)) def", "but are instead triggered by a change of state of their child alarms.", "import inspect class AlarmServer(object): def __init__(self): # Maps alarm name to Alarm objects", "True def _on_set_alarm(self, srv): self.set_alarm(srv.alarm) return True def _on_get_alarm(self, srv): ''' Either returns", "raised res = self.meta_alarms[meta_alarm](meta, alarms) # If it an alarm instance send it", "python path. They will be loaded from the module specified with the ~handler_module", "= rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() # Outside interface to the alarm", "it is raised, set problem description alarm.problem_description = 'Raised by meta alarm' else:", "raised, set problem description alarm.problem_description = 'Raised by meta alarm' else: rospy.logwarn('Meta alarm", "self.alarms[meta] = self.make_tagged_alarm(meta) def default(meta, alarms): ''' If no predicate for a meta-alarm", "Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm = alarm.as_msg() self._alarm_pub.publish(alarm) return True def _on_set_alarm(self, srv): self.set_alarm(srv.alarm)", "self._create_meta_alarms() self._create_alarm_handlers() # Outside interface to the alarm system. Usually you don't want", "self.handlers = {} # Maps meta alarm names to predicate Handler functions self.meta_alarms", "meta_name=meta, sub_alarms=alarms): return self._handle_meta_alarm(meta_name, sub_alarms) for alarm in alarms: if alarm not in", "= h.initial_alarm # Update even if already added to server elif alarm_name not", "the alarm server and run code upon a change of state of their", "It has the following structure: {meta_alarm_name : [list of child alarm names], ...}", "inspect.getmembers(alarm_handlers) if inspect.isclass(cls) and issubclass(cls, HandlerBase) and hasattr(cls, \"alarm_name\") and name is not", "out as is if isinstance(res, Alarm): alarm = res alarm.alarm_name = meta_alarm #", "Alarm import inspect class AlarmServer(object): def __init__(self): # Maps alarm name to Alarm", "should return either an alarm object or a boolean for if should be", "= self.alarms[meta_alarm] # Check the predicate, this should return either an alarm object", "name to Alarm objects self.alarms = {} # Handler classes for overwriting default", "alarm in self.alarms.items() if name in sub_alarms} meta = self.alarms[meta_alarm] # Check the", "alarm triggers all of the alarms callbacks ''' if alarm.alarm_name in self.handlers: res", "param to the alarm server. ''' # If the param exists, load it", "set_alarm(self, alarm): ''' Sets or updates the alarm Updating the alarm triggers all", "as is if isinstance(res, Alarm): alarm = res alarm.alarm_name = meta_alarm # Ensure", "If it an alarm instance send it out as is if isinstance(res, Alarm):", "in self.alarms: # Add default initial if not there already self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name)", "return self.set_alarm(alarm) def _create_alarm_handlers(self): ''' Alarm handlers are classes imported by the alarm", "self.alarms: self.alarms[alarm] = self.make_tagged_alarm(alarm) self.alarms[alarm].add_callback(cb) if __name__ == \"__main__\": rospy.init_node(\"alarm_server\") a = AlarmServer()", "server and run code upon a change of state of their respective alarms.", "= res if raised_status == meta.raised: return alarm = meta.as_msg() alarm.raised = bool(raised_status)", "exists exists h = handler() alarm_name = handler.alarm_name # Set initial state if", "= {} msg = \"Expecting at most the following alarms: {}\" rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", [])))", "AlarmGet, self._on_get_alarm) def set_alarm(self, alarm): ''' Sets or updates the alarm Updating the", "rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() # Outside interface to the alarm system.", "''' Calls the meta_predicate callback for an alarm handler when one of its", "self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] = h.initial_alarm # Update even if already added to server", "a meta-alarm is provided, then the meta-alarm will be raised if any of", "None) if handler_module is None: return # Give handlers access to alarm server", "handler exists for a meta alarm, we need to save the predicate if", "alarm.problem_description = 'Raised by meta alarm' else: rospy.logwarn('Meta alarm callback for {} failed", "need to save the predicate if alarm_name in self.meta_alarms: self.meta_alarms[alarm_name] = h.meta_predicate self.handlers[alarm_name]", "while creating metas) if hasattr(h, 'initial_alarm'): if alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name]", "for a meta-alarm is provided, then the meta-alarm will be raised if any", "rospy.loginfo(msg.format(rospy.get_param(\"/known_alarms\", []))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() # Outside interface", "else: pass # If a handler exists for a meta alarm, we need", "default alarm functionality self.handlers = {} # Maps meta alarm names to predicate", "mechanisms by providing an alarm handler class with a 'meta_predicate' method. ''' meta_alarms_dict", "raised if any of the child alarms are raised ''' return any(alarms.items()) self.meta_alarms[meta]", "[]))) self._alarm_pub = rospy.Publisher(\"/alarm/updates\", AlarmMsg, latch=True, queue_size=100) self._create_meta_alarms() self._create_alarm_handlers() # Outside interface to", "self._on_get_alarm) def set_alarm(self, alarm): ''' Sets or updates the alarm Updating the alarm", "= self.meta_alarms[meta_alarm](meta, alarms) # If it an alarm instance send it out as", "the alarm triggers all of the alarms callbacks ''' if alarm.alarm_name in self.handlers:", "hasattr(h, 'initial_alarm'): if alarm_name in self.alarms: self.alarms[alarm_name].update(h.initial_alarm) else: self.alarms[alarm_name] = h.initial_alarm # Update", "Alarm): alarm = res alarm.alarm_name = meta_alarm # Ensure alarm name is correct", "if isinstance(res, Alarm): alarm = res alarm.alarm_name = meta_alarm # Ensure alarm name", "module (directory with an __init__.py) and in the python path. They will be", "else: self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm) if isinstance(alarm,Alarm): alarm = alarm.as_msg() self._alarm_pub.publish(alarm) return True def", "nessesary. ''' alarms = {name: alarm for name, alarm in self.alarms.items() if name", "to the alarm server. ''' # If the param exists, load it here", "issubclass(cls, HandlerBase) and hasattr(cls, \"alarm_name\") and name is not \"HandlerBase\"]: # Have to", "following structure: {meta_alarm_name : [list of child alarm names], ...} Users can also", "alarms are raised ''' return any(alarms.items()) self.meta_alarms[meta] = default def cb(alarm, meta_name=meta, sub_alarms=alarms):", "return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp() def make_tagged_alarm(self, name): ''' Makes a blank alarm with the", "of child alarm names], ...} Users can also provide more complex triggering mechanisms", "= bool(raised_status) if alarm.raised: # If it is raised, set problem description alarm.problem_description", "provide more complex triggering mechanisms by providing an alarm handler class with a", "any of the child alarms are raised ''' return any(alarms.items()) self.meta_alarms[meta] = default", "not \"HandlerBase\"]: # Have to instantiate so the class exists exists h =" ]
[ "if d[\"method\"] == \"abs_distance\" and (\"max_dist\" not in d[\"params\"]): errors.append(f\"method is 'abs_distance', max_dist", "pheno1, pheno2): for key in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"],", "abs(p1 - p2) / max(abs(p1), abs(p2)) <= max_percent @classmethod def _phenos_equal_account_for_none( cls, p1,", "**self.constraints[key][\"params\"], ) def phenos_agree_on_features(self, pheno1, pheno2, keys): for key in keys: if not", "constraint[\"must_be_same\"] is False: continue elif not self.phenos_agree_on_one_feature(pheno1, pheno2, key): differences += 1 return", "PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints = constraints errors = self._sanity_check_constraints() if len(errors): raise", "self.compare_functions = { \"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints", "None: return not count_unknown_as_diff else: return compare_function(p1, p2, **kwargs) def phenos_agree_on_one_feature(self, pheno1, pheno2,", "<= p2 <= high) @staticmethod def _compare_method_abs_distance(p1, p2, max_dist=None): return abs(p1 - p2)", "if d[\"method\"] not in self.compare_functions: errors.append(f\"Unknown method {d}\") continue if d[\"method\"] == \"equal\":", "def __init__(self, constraints, count_unknown_as_diff=True): self.compare_functions = { \"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance,", "the differences only from he constraints where 'must_be_same' is True\"\"\" differences = 0", "def phenos_agree_on_one_feature(self, pheno1, pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], )", "not in d[\"params\"] ): errors.append(f\"method is 'range', low and high not supplied: {d}\")", "@staticmethod def _compare_method_range(p1, p2, low=None, high=None): return (low <= p1 <= high) ==", "pheno2): \"\"\"Returns number of differences between the two phenotypes. Assumes that satisfy_required_differences(pheno1, pheno2)", "Assumes that satisfy_required_differences(pheno1, pheno2) is True. (Or at least doesn't care if it's", "not in d: d[\"params\"] = {} elif len(d[\"params\"]) > 0: errors.append(f\"method is 'equal',", "True\"\"\" differences = 0 for key, constraint in self.constraints.items(): if constraint[\"must_be_same\"] is False:", "k for k in self.constraints if not self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self): errors =", "True return 100 * abs(p1 - p2) / max(abs(p1), abs(p2)) <= max_percent @classmethod", "p2 <= high) @staticmethod def _compare_method_abs_distance(p1, p2, max_dist=None): return abs(p1 - p2) <=", "== \"percent_distance\" and (\"max_percent\" not in d[\"params\"]): errors.append( f\"method is 'percent_distance', max_percent not", "{ k for k in self.constraints if not self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self): errors", "d[\"method\"] not in self.compare_functions: errors.append(f\"Unknown method {d}\") continue if d[\"method\"] == \"equal\": if", "count_unknown_as_diff else: return compare_function(p1, p2, **kwargs) def phenos_agree_on_one_feature(self, pheno1, pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none(", "if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ): return False return True @staticmethod", "self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def phenos_agree_on_features(self, pheno1, pheno2, keys): for key in keys: if", "in d[\"params\"] ): errors.append(f\"method is 'range', low and high not supplied: {d}\") if", "not supplied: {d}\" ) return errors def satisfy_required_differences(self, pheno1, pheno2): for key in", "max_percent @classmethod def _phenos_equal_account_for_none( cls, p1, p2, compare_function, count_unknown_as_diff, **kwargs ): if p1", "return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def phenos_agree_on_features(self, pheno1, pheno2, keys):", "p2 @staticmethod def _compare_method_range(p1, p2, low=None, high=None): return (low <= p1 <= high)", "p2, max_percent=None): if p1 == p2 == 0: return True return 100 *", "PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints = constraints errors =", "in d[\"params\"]): errors.append(f\"method is 'abs_distance', max_dist not supplied: {d}\") if d[\"method\"] == \"percent_distance\"", "(Or at least doesn't care if it's True or False.) Counts the differences", "False, **self.constraints[key][\"params\"], ): return False return True @staticmethod def _compare_method_equal(p1, p2): return p1", "constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys = { k for k in", "self.compare_functions: errors.append(f\"Unknown method {d}\") continue if d[\"method\"] == \"equal\": if \"params\" not in", "\"abs_distance\" and (\"max_dist\" not in d[\"params\"]): errors.append(f\"method is 'abs_distance', max_dist not supplied: {d}\")", "or p2 is None: return not count_unknown_as_diff else: return compare_function(p1, p2, **kwargs) def", "len(errors): raise RuntimeError(\"Errors in constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys = {", "**self.constraints[key][\"params\"], ): return False return True @staticmethod def _compare_method_equal(p1, p2): return p1 ==", "p1 <= high) == (low <= p2 <= high) @staticmethod def _compare_method_abs_distance(p1, p2,", "high) @staticmethod def _compare_method_abs_distance(p1, p2, max_dist=None): return abs(p1 - p2) <= max_dist @staticmethod", "satisfy_required_differences(pheno1, pheno2) is True. (Or at least doesn't care if it's True or", "p2) <= max_dist @staticmethod def _compare_method_percent_distance(p1, p2, max_percent=None): if p1 == p2 ==", "'percent_distance', max_percent not supplied: {d}\" ) return errors def satisfy_required_differences(self, pheno1, pheno2): for", "'equal', params supplied: {d}\") if d[\"method\"] == \"range\" and ( \"low\" not in", "= [] for d in self.constraints.values(): if d[\"method\"] not in self.compare_functions: errors.append(f\"Unknown method", "care if it's True or False.) Counts the differences only from he constraints", "between the two phenotypes. Assumes that satisfy_required_differences(pheno1, pheno2) is True. (Or at least", "def _compare_method_abs_distance(p1, p2, max_dist=None): return abs(p1 - p2) <= max_dist @staticmethod def _compare_method_percent_distance(p1,", "least doesn't care if it's True or False.) Counts the differences only from", "pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ): return False return True @staticmethod def _compare_method_equal(p1,", "0: errors.append(f\"method is 'equal', params supplied: {d}\") if d[\"method\"] == \"range\" and (", "f\"method is 'percent_distance', max_percent not supplied: {d}\" ) return errors def satisfy_required_differences(self, pheno1,", "pheno1, pheno2): \"\"\"Returns number of differences between the two phenotypes. Assumes that satisfy_required_differences(pheno1,", "d[\"method\"] == \"equal\": if \"params\" not in d: d[\"params\"] = {} elif len(d[\"params\"])", "**kwargs) def phenos_agree_on_one_feature(self, pheno1, pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"],", "key in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ): return False", "True def differences(self, pheno1, pheno2): \"\"\"Returns number of differences between the two phenotypes.", "constraints where 'must_be_same' is True\"\"\" differences = 0 for key, constraint in self.constraints.items():", "d[\"params\"] or \"high\" not in d[\"params\"] ): errors.append(f\"method is 'range', low and high", "{ \"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints = constraints", "key in keys: if not self.phenos_agree_on_one_feature(pheno1, pheno2, key): return False return True def", "in d[\"params\"]): errors.append( f\"method is 'percent_distance', max_percent not supplied: {d}\" ) return errors", "return 100 * abs(p1 - p2) / max(abs(p1), abs(p2)) <= max_percent @classmethod def", "p2, compare_function, count_unknown_as_diff, **kwargs ): if p1 is None or p2 is None:", "0: return True return 100 * abs(p1 - p2) / max(abs(p1), abs(p2)) <=", "(low <= p2 <= high) @staticmethod def _compare_method_abs_distance(p1, p2, max_dist=None): return abs(p1 -", "= 0 for key, constraint in self.constraints.items(): if constraint[\"must_be_same\"] is False: continue elif", "low and high not supplied: {d}\") if d[\"method\"] == \"abs_distance\" and (\"max_dist\" not", "== 0: return True return 100 * abs(p1 - p2) / max(abs(p1), abs(p2))", "that satisfy_required_differences(pheno1, pheno2) is True. (Or at least doesn't care if it's True", "pheno1, pheno2, keys): for key in keys: if not self.phenos_agree_on_one_feature(pheno1, pheno2, key): return", "- p2) / max(abs(p1), abs(p2)) <= max_percent @classmethod def _phenos_equal_account_for_none( cls, p1, p2,", "phenos_agree_on_features(self, pheno1, pheno2, keys): for key in keys: if not self.phenos_agree_on_one_feature(pheno1, pheno2, key):", "not self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self): errors = [] for d in self.constraints.values(): if", "{} elif len(d[\"params\"]) > 0: errors.append(f\"method is 'equal', params supplied: {d}\") if d[\"method\"]", "(\"max_dist\" not in d[\"params\"]): errors.append(f\"method is 'abs_distance', max_dist not supplied: {d}\") if d[\"method\"]", "def phenos_agree_on_features(self, pheno1, pheno2, keys): for key in keys: if not self.phenos_agree_on_one_feature(pheno1, pheno2,", "not in d[\"params\"]): errors.append(f\"method is 'abs_distance', max_dist not supplied: {d}\") if d[\"method\"] ==", "): errors.append(f\"method is 'range', low and high not supplied: {d}\") if d[\"method\"] ==", "key): return False return True def differences(self, pheno1, pheno2): \"\"\"Returns number of differences", "is 'abs_distance', max_dist not supplied: {d}\") if d[\"method\"] == \"percent_distance\" and (\"max_percent\" not", "is True. (Or at least doesn't care if it's True or False.) Counts", "count_unknown_as_diff self.required_diff_keys = { k for k in self.constraints if not self.constraints[k][\"must_be_same\"] }", "if d[\"method\"] == \"equal\": if \"params\" not in d: d[\"params\"] = {} elif", "return True @staticmethod def _compare_method_equal(p1, p2): return p1 == p2 @staticmethod def _compare_method_range(p1,", "not in self.compare_functions: errors.append(f\"Unknown method {d}\") continue if d[\"method\"] == \"equal\": if \"params\"", "self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys = { k for k in self.constraints if not", "= { k for k in self.constraints if not self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self):", "'must_be_same' is True\"\"\" differences = 0 for key, constraint in self.constraints.items(): if constraint[\"must_be_same\"]", "errors.append(f\"method is 'abs_distance', max_dist not supplied: {d}\") if d[\"method\"] == \"percent_distance\" and (\"max_percent\"", "= self._sanity_check_constraints() if len(errors): raise RuntimeError(\"Errors in constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff", "return False return True def differences(self, pheno1, pheno2): \"\"\"Returns number of differences between", "not self.phenos_agree_on_one_feature(pheno1, pheno2, key): return False return True def differences(self, pheno1, pheno2): \"\"\"Returns", "max_dist @staticmethod def _compare_method_percent_distance(p1, p2, max_percent=None): if p1 == p2 == 0: return", "True @staticmethod def _compare_method_equal(p1, p2): return p1 == p2 @staticmethod def _compare_method_range(p1, p2,", "p2, max_dist=None): return abs(p1 - p2) <= max_dist @staticmethod def _compare_method_percent_distance(p1, p2, max_percent=None):", "d: d[\"params\"] = {} elif len(d[\"params\"]) > 0: errors.append(f\"method is 'equal', params supplied:", "two phenotypes. Assumes that satisfy_required_differences(pheno1, pheno2) is True. (Or at least doesn't care", "satisfy_required_differences(self, pheno1, pheno2): for key in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False,", "return errors def satisfy_required_differences(self, pheno1, pheno2): for key in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key],", "cls, p1, p2, compare_function, count_unknown_as_diff, **kwargs ): if p1 is None or p2", "doesn't care if it's True or False.) Counts the differences only from he", "in constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys = { k for k", "p1, p2, compare_function, count_unknown_as_diff, **kwargs ): if p1 is None or p2 is", "True. (Or at least doesn't care if it's True or False.) Counts the", "where 'must_be_same' is True\"\"\" differences = 0 for key, constraint in self.constraints.items(): if", "not supplied: {d}\") if d[\"method\"] == \"percent_distance\" and (\"max_percent\" not in d[\"params\"]): errors.append(", "return True def differences(self, pheno1, pheno2): \"\"\"Returns number of differences between the two", "in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ): return False return", "_compare_method_range(p1, p2, low=None, high=None): return (low <= p1 <= high) == (low <=", "in keys: if not self.phenos_agree_on_one_feature(pheno1, pheno2, key): return False return True def differences(self,", ") return errors def satisfy_required_differences(self, pheno1, pheno2): for key in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none(", "self.required_diff_keys = { k for k in self.constraints if not self.constraints[k][\"must_be_same\"] } def", "if it's True or False.) Counts the differences only from he constraints where", "pheno2, key): return False return True def differences(self, pheno1, pheno2): \"\"\"Returns number of", "continue if d[\"method\"] == \"equal\": if \"params\" not in d: d[\"params\"] = {}", "is 'equal', params supplied: {d}\") if d[\"method\"] == \"range\" and ( \"low\" not", "count_unknown_as_diff, **kwargs ): if p1 is None or p2 is None: return not", "errors.append(f\"method is 'range', low and high not supplied: {d}\") if d[\"method\"] == \"abs_distance\"", "supplied: {d}\") if d[\"method\"] == \"abs_distance\" and (\"max_dist\" not in d[\"params\"]): errors.append(f\"method is", "len(d[\"params\"]) > 0: errors.append(f\"method is 'equal', params supplied: {d}\") if d[\"method\"] == \"range\"", "pheno2, keys): for key in keys: if not self.phenos_agree_on_one_feature(pheno1, pheno2, key): return False", "differences(self, pheno1, pheno2): \"\"\"Returns number of differences between the two phenotypes. Assumes that", "[] for d in self.constraints.values(): if d[\"method\"] not in self.compare_functions: errors.append(f\"Unknown method {d}\")", "not in d[\"params\"]): errors.append( f\"method is 'percent_distance', max_percent not supplied: {d}\" ) return", "_compare_method_percent_distance(p1, p2, max_percent=None): if p1 == p2 == 0: return True return 100", "Counts the differences only from he constraints where 'must_be_same' is True\"\"\" differences =", "for key in keys: if not self.phenos_agree_on_one_feature(pheno1, pheno2, key): return False return True", "d[\"params\"] ): errors.append(f\"method is 'range', low and high not supplied: {d}\") if d[\"method\"]", "(\"max_percent\" not in d[\"params\"]): errors.append( f\"method is 'percent_distance', max_percent not supplied: {d}\" )", "<= high) == (low <= p2 <= high) @staticmethod def _compare_method_abs_distance(p1, p2, max_dist=None):", "supplied: {d}\") if d[\"method\"] == \"percent_distance\" and (\"max_percent\" not in d[\"params\"]): errors.append( f\"method", "def _compare_method_range(p1, p2, low=None, high=None): return (low <= p1 <= high) == (low", "if \"params\" not in d: d[\"params\"] = {} elif len(d[\"params\"]) > 0: errors.append(f\"method", "0 for key, constraint in self.constraints.items(): if constraint[\"must_be_same\"] is False: continue elif not", "it's True or False.) Counts the differences only from he constraints where 'must_be_same'", "and (\"max_dist\" not in d[\"params\"]): errors.append(f\"method is 'abs_distance', max_dist not supplied: {d}\") if", "- p2) <= max_dist @staticmethod def _compare_method_percent_distance(p1, p2, max_percent=None): if p1 == p2", "\"equal\": if \"params\" not in d: d[\"params\"] = {} elif len(d[\"params\"]) > 0:", "in self.constraints.items(): if constraint[\"must_be_same\"] is False: continue elif not self.phenos_agree_on_one_feature(pheno1, pheno2, key): differences", "number of differences between the two phenotypes. Assumes that satisfy_required_differences(pheno1, pheno2) is True.", "return compare_function(p1, p2, **kwargs) def phenos_agree_on_one_feature(self, pheno1, pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key],", "pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def phenos_agree_on_features(self, pheno1, pheno2, keys): for key in", "compare_function, count_unknown_as_diff, **kwargs ): if p1 is None or p2 is None: return", "only from he constraints where 'must_be_same' is True\"\"\" differences = 0 for key,", "d[\"params\"]): errors.append(f\"method is 'abs_distance', max_dist not supplied: {d}\") if d[\"method\"] == \"percent_distance\" and", "params supplied: {d}\") if d[\"method\"] == \"range\" and ( \"low\" not in d[\"params\"]", "if d[\"method\"] == \"percent_distance\" and (\"max_percent\" not in d[\"params\"]): errors.append( f\"method is 'percent_distance',", "== \"equal\": if \"params\" not in d: d[\"params\"] = {} elif len(d[\"params\"]) >", "at least doesn't care if it's True or False.) Counts the differences only", "from he constraints where 'must_be_same' is True\"\"\" differences = 0 for key, constraint", "pheno2) is True. (Or at least doesn't care if it's True or False.)", "== \"abs_distance\" and (\"max_dist\" not in d[\"params\"]): errors.append(f\"method is 'abs_distance', max_dist not supplied:", "errors def satisfy_required_differences(self, pheno1, pheno2): for key in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key],", "low=None, high=None): return (low <= p1 <= high) == (low <= p2 <=", "_sanity_check_constraints(self): errors = [] for d in self.constraints.values(): if d[\"method\"] not in self.compare_functions:", "== p2 == 0: return True return 100 * abs(p1 - p2) /", "if p1 == p2 == 0: return True return 100 * abs(p1 -", "not count_unknown_as_diff else: return compare_function(p1, p2, **kwargs) def phenos_agree_on_one_feature(self, pheno1, pheno2, key): return", "method {d}\") continue if d[\"method\"] == \"equal\": if \"params\" not in d: d[\"params\"]", "__init__(self, constraints, count_unknown_as_diff=True): self.compare_functions = { \"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\":", "errors.append(f\"Unknown method {d}\") continue if d[\"method\"] == \"equal\": if \"params\" not in d:", "\"low\" not in d[\"params\"] or \"high\" not in d[\"params\"] ): errors.append(f\"method is 'range',", "PhenotypeCompare._compare_method_percent_distance, } self.constraints = constraints errors = self._sanity_check_constraints() if len(errors): raise RuntimeError(\"Errors in", "if not self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self): errors = [] for d in self.constraints.values():", "self.phenos_agree_on_one_feature(pheno1, pheno2, key): return False return True def differences(self, pheno1, pheno2): \"\"\"Returns number", "not supplied: {d}\") if d[\"method\"] == \"abs_distance\" and (\"max_dist\" not in d[\"params\"]): errors.append(f\"method", "return not count_unknown_as_diff else: return compare_function(p1, p2, **kwargs) def phenos_agree_on_one_feature(self, pheno1, pheno2, key):", "> 0: errors.append(f\"method is 'equal', params supplied: {d}\") if d[\"method\"] == \"range\" and", "<= max_dist @staticmethod def _compare_method_percent_distance(p1, p2, max_percent=None): if p1 == p2 == 0:", "max_percent not supplied: {d}\" ) return errors def satisfy_required_differences(self, pheno1, pheno2): for key", "p1 == p2 == 0: return True return 100 * abs(p1 - p2)", "\"high\" not in d[\"params\"] ): errors.append(f\"method is 'range', low and high not supplied:", "\"percent_distance\" and (\"max_percent\" not in d[\"params\"]): errors.append( f\"method is 'percent_distance', max_percent not supplied:", "'range', low and high not supplied: {d}\") if d[\"method\"] == \"abs_distance\" and (\"max_dist\"", "is None or p2 is None: return not count_unknown_as_diff else: return compare_function(p1, p2,", "is None: return not count_unknown_as_diff else: return compare_function(p1, p2, **kwargs) def phenos_agree_on_one_feature(self, pheno1,", "constraints errors = self._sanity_check_constraints() if len(errors): raise RuntimeError(\"Errors in constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff", "@staticmethod def _compare_method_percent_distance(p1, p2, max_percent=None): if p1 == p2 == 0: return True", "True or False.) Counts the differences only from he constraints where 'must_be_same' is", "+ \"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys = { k for k in self.constraints", "p1 is None or p2 is None: return not count_unknown_as_diff else: return compare_function(p1,", "for k in self.constraints if not self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self): errors = []", "p2): return p1 == p2 @staticmethod def _compare_method_range(p1, p2, low=None, high=None): return (low", "\"range\" and ( \"low\" not in d[\"params\"] or \"high\" not in d[\"params\"] ):", "high) == (low <= p2 <= high) @staticmethod def _compare_method_abs_distance(p1, p2, max_dist=None): return", "False.) Counts the differences only from he constraints where 'must_be_same' is True\"\"\" differences", "self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ): return False return True @staticmethod def _compare_method_equal(p1, p2): return", "def _compare_method_percent_distance(p1, p2, max_percent=None): if p1 == p2 == 0: return True return", "or False.) Counts the differences only from he constraints where 'must_be_same' is True\"\"\"", "@staticmethod def _compare_method_equal(p1, p2): return p1 == p2 @staticmethod def _compare_method_range(p1, p2, low=None,", "d[\"params\"]): errors.append( f\"method is 'percent_distance', max_percent not supplied: {d}\" ) return errors def", "is 'range', low and high not supplied: {d}\") if d[\"method\"] == \"abs_distance\" and", "100 * abs(p1 - p2) / max(abs(p1), abs(p2)) <= max_percent @classmethod def _phenos_equal_account_for_none(", "\"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys = { k for k in self.constraints if", "differences between the two phenotypes. Assumes that satisfy_required_differences(pheno1, pheno2) is True. (Or at", "d[\"method\"] == \"abs_distance\" and (\"max_dist\" not in d[\"params\"]): errors.append(f\"method is 'abs_distance', max_dist not", "high=None): return (low <= p1 <= high) == (low <= p2 <= high)", "keys: if not self.phenos_agree_on_one_feature(pheno1, pheno2, key): return False return True def differences(self, pheno1,", "PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints = constraints errors = self._sanity_check_constraints() if", "RuntimeError(\"Errors in constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys = { k for", "_compare_method_equal(p1, p2): return p1 == p2 @staticmethod def _compare_method_range(p1, p2, low=None, high=None): return", "d[\"method\"] == \"range\" and ( \"low\" not in d[\"params\"] or \"high\" not in", "phenotypes. Assumes that satisfy_required_differences(pheno1, pheno2) is True. (Or at least doesn't care if", "in self.constraints if not self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self): errors = [] for d", "= constraints errors = self._sanity_check_constraints() if len(errors): raise RuntimeError(\"Errors in constraints:\\n\" + \"\\n\".join(errors))", "PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ): return False return True @staticmethod def", "max_percent=None): if p1 == p2 == 0: return True return 100 * abs(p1", "_compare_method_abs_distance(p1, p2, max_dist=None): return abs(p1 - p2) <= max_dist @staticmethod def _compare_method_percent_distance(p1, p2,", "PhenotypeCompare: def __init__(self, constraints, count_unknown_as_diff=True): self.compare_functions = { \"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\":", "and high not supplied: {d}\") if d[\"method\"] == \"abs_distance\" and (\"max_dist\" not in", "<= high) @staticmethod def _compare_method_abs_distance(p1, p2, max_dist=None): return abs(p1 - p2) <= max_dist", "def _compare_method_equal(p1, p2): return p1 == p2 @staticmethod def _compare_method_range(p1, p2, low=None, high=None):", "return (low <= p1 <= high) == (low <= p2 <= high) @staticmethod", "max(abs(p1), abs(p2)) <= max_percent @classmethod def _phenos_equal_account_for_none( cls, p1, p2, compare_function, count_unknown_as_diff, **kwargs", "d[\"method\"] == \"percent_distance\" and (\"max_percent\" not in d[\"params\"]): errors.append( f\"method is 'percent_distance', max_percent", "elif len(d[\"params\"]) > 0: errors.append(f\"method is 'equal', params supplied: {d}\") if d[\"method\"] ==", "/ max(abs(p1), abs(p2)) <= max_percent @classmethod def _phenos_equal_account_for_none( cls, p1, p2, compare_function, count_unknown_as_diff,", "count_unknown_as_diff=True): self.compare_functions = { \"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, }", "in self.constraints.values(): if d[\"method\"] not in self.compare_functions: errors.append(f\"Unknown method {d}\") continue if d[\"method\"]", "errors.append(f\"method is 'equal', params supplied: {d}\") if d[\"method\"] == \"range\" and ( \"low\"", "for d in self.constraints.values(): if d[\"method\"] not in self.compare_functions: errors.append(f\"Unknown method {d}\") continue", "} def _sanity_check_constraints(self): errors = [] for d in self.constraints.values(): if d[\"method\"] not", "if d[\"method\"] == \"range\" and ( \"low\" not in d[\"params\"] or \"high\" not", "compare_function(p1, p2, **kwargs) def phenos_agree_on_one_feature(self, pheno1, pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]],", "k in self.constraints if not self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self): errors = [] for", "return True return 100 * abs(p1 - p2) / max(abs(p1), abs(p2)) <= max_percent", "== (low <= p2 <= high) @staticmethod def _compare_method_abs_distance(p1, p2, max_dist=None): return abs(p1", "keys): for key in keys: if not self.phenos_agree_on_one_feature(pheno1, pheno2, key): return False return", "in d: d[\"params\"] = {} elif len(d[\"params\"]) > 0: errors.append(f\"method is 'equal', params", "is 'percent_distance', max_percent not supplied: {d}\" ) return errors def satisfy_required_differences(self, pheno1, pheno2):", "self.constraints if not self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self): errors = [] for d in", "def differences(self, pheno1, pheno2): \"\"\"Returns number of differences between the two phenotypes. Assumes", "self.constraints.values(): if d[\"method\"] not in self.compare_functions: errors.append(f\"Unknown method {d}\") continue if d[\"method\"] ==", "if len(errors): raise RuntimeError(\"Errors in constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys =", "raise RuntimeError(\"Errors in constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys = { k", "{d}\") if d[\"method\"] == \"range\" and ( \"low\" not in d[\"params\"] or \"high\"", "): if p1 is None or p2 is None: return not count_unknown_as_diff else:", "False return True @staticmethod def _compare_method_equal(p1, p2): return p1 == p2 @staticmethod def", "{d}\") if d[\"method\"] == \"percent_distance\" and (\"max_percent\" not in d[\"params\"]): errors.append( f\"method is", "None or p2 is None: return not count_unknown_as_diff else: return compare_function(p1, p2, **kwargs)", "in self.compare_functions: errors.append(f\"Unknown method {d}\") continue if d[\"method\"] == \"equal\": if \"params\" not", "self.constraints[k][\"must_be_same\"] } def _sanity_check_constraints(self): errors = [] for d in self.constraints.values(): if d[\"method\"]", "errors = [] for d in self.constraints.values(): if d[\"method\"] not in self.compare_functions: errors.append(f\"Unknown", "== \"range\" and ( \"low\" not in d[\"params\"] or \"high\" not in d[\"params\"]", "not in d[\"params\"] or \"high\" not in d[\"params\"] ): errors.append(f\"method is 'range', low", "of differences between the two phenotypes. Assumes that satisfy_required_differences(pheno1, pheno2) is True. (Or", "if not self.phenos_agree_on_one_feature(pheno1, pheno2, key): return False return True def differences(self, pheno1, pheno2):", "the two phenotypes. Assumes that satisfy_required_differences(pheno1, pheno2) is True. (Or at least doesn't", "* abs(p1 - p2) / max(abs(p1), abs(p2)) <= max_percent @classmethod def _phenos_equal_account_for_none( cls,", "he constraints where 'must_be_same' is True\"\"\" differences = 0 for key, constraint in", "for key in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ): return", "= {} elif len(d[\"params\"]) > 0: errors.append(f\"method is 'equal', params supplied: {d}\") if", "<= max_percent @classmethod def _phenos_equal_account_for_none( cls, p1, p2, compare_function, count_unknown_as_diff, **kwargs ): if", "(low <= p1 <= high) == (low <= p2 <= high) @staticmethod def", "and (\"max_percent\" not in d[\"params\"]): errors.append( f\"method is 'percent_distance', max_percent not supplied: {d}\"", "return False return True @staticmethod def _compare_method_equal(p1, p2): return p1 == p2 @staticmethod", "max_dist=None): return abs(p1 - p2) <= max_dist @staticmethod def _compare_method_percent_distance(p1, p2, max_percent=None): if", "\"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints = constraints errors", "= count_unknown_as_diff self.required_diff_keys = { k for k in self.constraints if not self.constraints[k][\"must_be_same\"]", "supplied: {d}\" ) return errors def satisfy_required_differences(self, pheno1, pheno2): for key in self.required_diff_keys:", "_phenos_equal_account_for_none( cls, p1, p2, compare_function, count_unknown_as_diff, **kwargs ): if p1 is None or", "key, constraint in self.constraints.items(): if constraint[\"must_be_same\"] is False: continue elif not self.phenos_agree_on_one_feature(pheno1, pheno2,", "{d}\" ) return errors def satisfy_required_differences(self, pheno1, pheno2): for key in self.required_diff_keys: if", "differences only from he constraints where 'must_be_same' is True\"\"\" differences = 0 for", "self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ): return False return True", "@classmethod def _phenos_equal_account_for_none( cls, p1, p2, compare_function, count_unknown_as_diff, **kwargs ): if p1 is", "@staticmethod def _compare_method_abs_distance(p1, p2, max_dist=None): return abs(p1 - p2) <= max_dist @staticmethod def", "def satisfy_required_differences(self, pheno1, pheno2): for key in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]],", "for key, constraint in self.constraints.items(): if constraint[\"must_be_same\"] is False: continue elif not self.phenos_agree_on_one_feature(pheno1,", "pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ): return False return True @staticmethod def _compare_method_equal(p1, p2):", "p2, low=None, high=None): return (low <= p1 <= high) == (low <= p2", "or \"high\" not in d[\"params\"] ): errors.append(f\"method is 'range', low and high not", "self._sanity_check_constraints() if len(errors): raise RuntimeError(\"Errors in constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff = count_unknown_as_diff self.required_diff_keys", "p1 == p2 @staticmethod def _compare_method_range(p1, p2, low=None, high=None): return (low <= p1", "'abs_distance', max_dist not supplied: {d}\") if d[\"method\"] == \"percent_distance\" and (\"max_percent\" not in", "pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def phenos_agree_on_features(self, pheno1,", ") def phenos_agree_on_features(self, pheno1, pheno2, keys): for key in keys: if not self.phenos_agree_on_one_feature(pheno1,", "self.constraints = constraints errors = self._sanity_check_constraints() if len(errors): raise RuntimeError(\"Errors in constraints:\\n\" +", "is False: continue elif not self.phenos_agree_on_one_feature(pheno1, pheno2, key): differences += 1 return differences", "{d}\") continue if d[\"method\"] == \"equal\": if \"params\" not in d: d[\"params\"] =", "pheno2): for key in self.required_diff_keys: if PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], False, **self.constraints[key][\"params\"], ):", "high not supplied: {d}\") if d[\"method\"] == \"abs_distance\" and (\"max_dist\" not in d[\"params\"]):", "d in self.constraints.values(): if d[\"method\"] not in self.compare_functions: errors.append(f\"Unknown method {d}\") continue if", "\"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints = constraints errors = self._sanity_check_constraints()", "if constraint[\"must_be_same\"] is False: continue elif not self.phenos_agree_on_one_feature(pheno1, pheno2, key): differences += 1", "max_dist not supplied: {d}\") if d[\"method\"] == \"percent_distance\" and (\"max_percent\" not in d[\"params\"]):", "abs(p2)) <= max_percent @classmethod def _phenos_equal_account_for_none( cls, p1, p2, compare_function, count_unknown_as_diff, **kwargs ):", "and ( \"low\" not in d[\"params\"] or \"high\" not in d[\"params\"] ): errors.append(f\"method", "<= p1 <= high) == (low <= p2 <= high) @staticmethod def _compare_method_abs_distance(p1,", "phenos_agree_on_one_feature(self, pheno1, pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def", "= { \"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints =", "\"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints = constraints errors = self._sanity_check_constraints() if len(errors):", "else: return compare_function(p1, p2, **kwargs) def phenos_agree_on_one_feature(self, pheno1, pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key],", "errors.append( f\"method is 'percent_distance', max_percent not supplied: {d}\" ) return errors def satisfy_required_differences(self,", "PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def phenos_agree_on_features(self, pheno1, pheno2, keys): for", "if p1 is None or p2 is None: return not count_unknown_as_diff else: return", "\"percent_distance\": PhenotypeCompare._compare_method_percent_distance, } self.constraints = constraints errors = self._sanity_check_constraints() if len(errors): raise RuntimeError(\"Errors", "p2 == 0: return True return 100 * abs(p1 - p2) / max(abs(p1),", "\"params\" not in d: d[\"params\"] = {} elif len(d[\"params\"]) > 0: errors.append(f\"method is", "def _sanity_check_constraints(self): errors = [] for d in self.constraints.values(): if d[\"method\"] not in", "== p2 @staticmethod def _compare_method_range(p1, p2, low=None, high=None): return (low <= p1 <=", "abs(p1 - p2) <= max_dist @staticmethod def _compare_method_percent_distance(p1, p2, max_percent=None): if p1 ==", "return abs(p1 - p2) <= max_dist @staticmethod def _compare_method_percent_distance(p1, p2, max_percent=None): if p1", "{d}\") if d[\"method\"] == \"abs_distance\" and (\"max_dist\" not in d[\"params\"]): errors.append(f\"method is 'abs_distance',", "return p1 == p2 @staticmethod def _compare_method_range(p1, p2, low=None, high=None): return (low <=", "is True\"\"\" differences = 0 for key, constraint in self.constraints.items(): if constraint[\"must_be_same\"] is", "( \"low\" not in d[\"params\"] or \"high\" not in d[\"params\"] ): errors.append(f\"method is", "differences = 0 for key, constraint in self.constraints.items(): if constraint[\"must_be_same\"] is False: continue", "pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def phenos_agree_on_features(self, pheno1, pheno2, keys): for key", "supplied: {d}\") if d[\"method\"] == \"range\" and ( \"low\" not in d[\"params\"] or", "p2, **kwargs) def phenos_agree_on_one_feature(self, pheno1, pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff,", "False return True def differences(self, pheno1, pheno2): \"\"\"Returns number of differences between the", "d[\"params\"] = {} elif len(d[\"params\"]) > 0: errors.append(f\"method is 'equal', params supplied: {d}\")", "errors = self._sanity_check_constraints() if len(errors): raise RuntimeError(\"Errors in constraints:\\n\" + \"\\n\".join(errors)) self.count_unknown_as_diff =", "**kwargs ): if p1 is None or p2 is None: return not count_unknown_as_diff", "self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def phenos_agree_on_features(self, pheno1, pheno2, keys): for key in keys:", "in d[\"params\"] or \"high\" not in d[\"params\"] ): errors.append(f\"method is 'range', low and", "self.constraints.items(): if constraint[\"must_be_same\"] is False: continue elif not self.phenos_agree_on_one_feature(pheno1, pheno2, key): differences +=", "} self.constraints = constraints errors = self._sanity_check_constraints() if len(errors): raise RuntimeError(\"Errors in constraints:\\n\"", "): return False return True @staticmethod def _compare_method_equal(p1, p2): return p1 == p2", "\"\"\"Returns number of differences between the two phenotypes. Assumes that satisfy_required_differences(pheno1, pheno2) is", "key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def phenos_agree_on_features(self, pheno1, pheno2,", "pheno1, pheno2, key): return PhenotypeCompare._phenos_equal_account_for_none( pheno1[key], pheno2[key], self.compare_functions[self.constraints[key][\"method\"]], self.count_unknown_as_diff, **self.constraints[key][\"params\"], ) def phenos_agree_on_features(self,", "class PhenotypeCompare: def __init__(self, constraints, count_unknown_as_diff=True): self.compare_functions = { \"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range,", "p2) / max(abs(p1), abs(p2)) <= max_percent @classmethod def _phenos_equal_account_for_none( cls, p1, p2, compare_function,", "constraint in self.constraints.items(): if constraint[\"must_be_same\"] is False: continue elif not self.phenos_agree_on_one_feature(pheno1, pheno2, key):", "def _phenos_equal_account_for_none( cls, p1, p2, compare_function, count_unknown_as_diff, **kwargs ): if p1 is None", "constraints, count_unknown_as_diff=True): self.compare_functions = { \"equal\": PhenotypeCompare._compare_method_equal, \"range\": PhenotypeCompare._compare_method_range, \"abs_distance\": PhenotypeCompare._compare_method_abs_distance, \"percent_distance\": PhenotypeCompare._compare_method_percent_distance,", "p2 is None: return not count_unknown_as_diff else: return compare_function(p1, p2, **kwargs) def phenos_agree_on_one_feature(self," ]
[]
[ "= output_dir + 'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file) # print df all_action_names = set(actions_df['action'].tolist())", "pandas as pd import os import spacy from spacy.symbols import VERB, NOUN import", "image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort() question = row['question'] doc = nlp(unicode(question)) question_action", "output_dir + 'question_action_data-v2.csv' editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file =", "read_json from common import save_data print \"Loading feature extractors...\" nlp = spacy.load('en') dataset_dir", "data['original_answer_tense'] = question_action data['replacement_action'] = replacement_action_conjugated data['relevant'] = 0 data['image_id'] = row['image_id'] data['qa_id']", "= question noedit_data['question'] = question noedit_data['answer'] = 'no edit because ' + question_action_conjugated", "output_actions_file = output_dir + 'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file) # print df all_action_names =", "noedit_data['original_question'] = question noedit_data['question'] = question noedit_data['answer'] = 'no edit because ' +", "import read_json from common import save_data print \"Loading feature extractors...\" nlp = spacy.load('en')", "import pandas as pd import os import spacy from spacy.symbols import VERB, NOUN", "= 0 total = len(df) for _,row in df.iterrows(): if i % 1000", "\"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question = ' '.join([replacement_action_conjugated if w == question_action else w", "editable_questions.append(data) noedit_data = {} noedit_data['image_file'] = image_file noedit_data['original_question'] = question noedit_data['question'] = question", "output_dir + 'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file) # print df all_action_names = set(actions_df['action'].tolist()) exclude", "question = row['question'] doc = nlp(unicode(question)) question_action = row['original_question_action'] actions_not_in_image = list(all_action_names -", "noedit_data['answer'] = 'no edit because ' + question_action_conjugated noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action'] =", "= replacement_action_conjugated data['relevant'] = 0 data['image_id'] = row['image_id'] data['qa_id'] = -1 * row['qa_id']", "= random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question = '", "= pd.read_csv(dataset_output_file) editable_questions = [] i = 0 total = len(df) for _,row", "' + question_action_conjugated data['original_answer_tense'] = question_action data['replacement_action'] = replacement_action_conjugated data['relevant'] = 0 data['image_id']", "0 data['image_id'] = row['image_id'] data['qa_id'] = -1 * row['qa_id'] data['image_actions'] = ','.join(image_actions) editable_questions.append(data)", "spacy from spacy.symbols import VERB, NOUN import random from pattern.en import conjugate, PROGRESSIVE,", "noedit_data['image_id'] = row['image_id'] noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions'] = data['image_actions'] editable_questions.append(noedit_data) editable_df = save_data(editable_questions,", "== image_file]['action'].unique().tolist() image_actions.sort() question = row['question'] doc = nlp(unicode(question)) question_action = row['original_question_action'] actions_not_in_image", "= set(exclude) all_action_names = all_action_names - exclude_actions # print all_action_names df = pd.read_csv(dataset_output_file)", "tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data = {} data['image_file'] = image_file data['original_question'] =", "= ','.join(image_actions) editable_questions.append(data) noedit_data = {} noedit_data['image_file'] = image_file noedit_data['original_question'] = question noedit_data['question']", "= question_action noedit_data['relevant'] = 1 noedit_data['image_id'] = row['image_id'] noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions'] =", "exclude_actions = set(exclude) all_action_names = all_action_names - exclude_actions # print all_action_names df =", "image_file]['action'].unique().tolist() image_actions.sort() question = row['question'] doc = nlp(unicode(question)) question_action = row['original_question_action'] actions_not_in_image =", "spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir + 'question_action_data-v2.csv' editable_dataset_output_file", "pd.read_csv(output_actions_file) # print df all_action_names = set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude +=", "INDICATIVE from utils import read_json from common import save_data print \"Loading feature extractors...\"", "save_data print \"Loading feature extractors...\" nlp = spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir =", "pd.read_csv(dataset_output_file) editable_questions = [] i = 0 total = len(df) for _,row in", "= [] i = 0 total = len(df) for _,row in df.iterrows(): if", "import random from pattern.en import conjugate, PROGRESSIVE, INDICATIVE from utils import read_json from", "editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir + 'action_image_data-v2.csv'", "aspect=PROGRESSIVE) data = {} data['image_file'] = image_file data['original_question'] = question data['question'] = editable_question", "random from pattern.en import conjugate, PROGRESSIVE, INDICATIVE from utils import read_json from common", "tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question = ' '.join([replacement_action_conjugated if w == question_action", "nlp = spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir +", "= '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir + 'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file) # print df", "{} noedit_data['image_file'] = image_file noedit_data['original_question'] = question noedit_data['question'] = question noedit_data['answer'] = 'no", "import os import spacy from spacy.symbols import VERB, NOUN import random from pattern.en", "== 0: print \"Question: [%d/%d]\" % (i,total) i += 1 # print row", "= \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data = {} data['image_file'] = image_file data['original_question'] = question", "= {} data['image_file'] = image_file data['original_question'] = question data['question'] = editable_question data['answer'] =", "= row['question'] doc = nlp(unicode(question)) question_action = row['original_question_action'] actions_not_in_image = list(all_action_names - set(image_actions))", "noedit_data['replacement_action'] = question_action noedit_data['relevant'] = 1 noedit_data['image_id'] = row['image_id'] noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions']", "print row image_file = row['image_file'] image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort() question =", "row image_file = row['image_file'] image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort() question = row['question']", "= list(all_action_names - set(image_actions)) replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action, tense = \"present\",", "set(exclude) all_action_names = all_action_names - exclude_actions # print all_action_names df = pd.read_csv(dataset_output_file) editable_questions", "print \"Loading feature extractors...\" nlp = spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/')", "data['original_question'] = question data['question'] = editable_question data['answer'] = 'edit to ' + question_action_conjugated", "data['answer'] = 'edit to ' + question_action_conjugated data['original_answer_tense'] = question_action data['replacement_action'] = replacement_action_conjugated", "question_action noedit_data['relevant'] = 1 noedit_data['image_id'] = row['image_id'] noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions'] = data['image_actions']", "question_action = row['original_question_action'] actions_not_in_image = list(all_action_names - set(image_actions)) replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated =", "as pd import os import spacy from spacy.symbols import VERB, NOUN import random", "in df.iterrows(): if i % 1000 == 0: print \"Question: [%d/%d]\" % (i,total)", "question noedit_data['question'] = question noedit_data['answer'] = 'no edit because ' + question_action_conjugated noedit_data['original_answer_tense']", "{} data['image_file'] = image_file data['original_question'] = question data['question'] = editable_question data['answer'] = 'edit", "= ' '.join([replacement_action_conjugated if w == question_action else w for w in question.split()])", "i = 0 total = len(df) for _,row in df.iterrows(): if i %", "= question data['question'] = editable_question data['answer'] = 'edit to ' + question_action_conjugated data['original_answer_tense']", "-1 * row['qa_id'] data['image_actions'] = ','.join(image_actions) editable_questions.append(data) noedit_data = {} noedit_data['image_file'] = image_file", "'/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir + 'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file) # print df all_action_names", "# print row image_file = row['image_file'] image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort() question", "from spacy.symbols import VERB, NOUN import random from pattern.en import conjugate, PROGRESSIVE, INDICATIVE", "+= ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude) all_action_names = all_action_names - exclude_actions # print", "os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir + 'question_action_data-v2.csv' editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir =", "= len(df) for _,row in df.iterrows(): if i % 1000 == 0: print", "i += 1 # print row image_file = row['image_file'] image_actions = actions_df[actions_df['image_file'] ==", "conjugate, PROGRESSIVE, INDICATIVE from utils import read_json from common import save_data print \"Loading", "image_file noedit_data['original_question'] = question noedit_data['question'] = question noedit_data['answer'] = 'no edit because '", "row['image_id'] data['qa_id'] = -1 * row['qa_id'] data['image_actions'] = ','.join(image_actions) editable_questions.append(data) noedit_data = {}", "# print all_action_names df = pd.read_csv(dataset_output_file) editable_questions = [] i = 0 total", "question_action data['replacement_action'] = replacement_action_conjugated data['relevant'] = 0 data['image_id'] = row['image_id'] data['qa_id'] = -1", "because ' + question_action_conjugated noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action'] = question_action noedit_data['relevant'] = 1", "= question_action noedit_data['replacement_action'] = question_action noedit_data['relevant'] = 1 noedit_data['image_id'] = row['image_id'] noedit_data['qa_id'] =", "row['qa_id'] data['image_actions'] = ','.join(image_actions) editable_questions.append(data) noedit_data = {} noedit_data['image_file'] = image_file noedit_data['original_question'] =", "<reponame>andeeptoor/qar-qae import pandas as pd import os import spacy from spacy.symbols import VERB,", "= 0 data['image_id'] = row['image_id'] data['qa_id'] = -1 * row['qa_id'] data['image_actions'] = ','.join(image_actions)", "actions_not_in_image = list(all_action_names - set(image_actions)) replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action, tense =", "import spacy from spacy.symbols import VERB, NOUN import random from pattern.en import conjugate,", "feature extractors...\" nlp = spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file =", "= image_file data['original_question'] = question data['question'] = editable_question data['answer'] = 'edit to '", "noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action'] = question_action noedit_data['relevant'] = 1 noedit_data['image_id'] = row['image_id'] noedit_data['qa_id']", "_,row in df.iterrows(): if i % 1000 == 0: print \"Question: [%d/%d]\" %", "= \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question = ' '.join([replacement_action_conjugated if w == question_action else", "noedit_data = {} noedit_data['image_file'] = image_file noedit_data['original_question'] = question noedit_data['question'] = question noedit_data['answer']", "w in question.split()]) question_action_conjugated = conjugate(question_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data =", "question noedit_data['answer'] = 'no edit because ' + question_action_conjugated noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action']", "0: print \"Question: [%d/%d]\" % (i,total) i += 1 # print row image_file", "data['relevant'] = 0 data['image_id'] = row['image_id'] data['qa_id'] = -1 * row['qa_id'] data['image_actions'] =", "set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude +=", "= output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir + 'action_image_data-v2.csv' actions_df", "= row['image_id'] noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions'] = data['image_actions'] editable_questions.append(noedit_data) editable_df = save_data(editable_questions, editable_dataset_output_file)", "all_action_names - exclude_actions # print all_action_names df = pd.read_csv(dataset_output_file) editable_questions = [] i", "question_action_conjugated data['original_answer_tense'] = question_action data['replacement_action'] = replacement_action_conjugated data['relevant'] = 0 data['image_id'] = row['image_id']", "= pd.read_csv(output_actions_file) # print df all_action_names = set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude", "= spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir + 'question_action_data-v2.csv'", "['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude) all_action_names = all_action_names - exclude_actions # print all_action_names", "row['original_question_action'] actions_not_in_image = list(all_action_names - set(image_actions)) replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action, tense", "replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question =", "' '.join([replacement_action_conjugated if w == question_action else w for w in question.split()]) question_action_conjugated", "= conjugate(replacement_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question = ' '.join([replacement_action_conjugated if w", "= 'no edit because ' + question_action_conjugated noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action'] = question_action", "data['image_file'] = image_file data['original_question'] = question data['question'] = editable_question data['answer'] = 'edit to", "' + question_action_conjugated noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action'] = question_action noedit_data['relevant'] = 1 noedit_data['image_id']", "w == question_action else w for w in question.split()]) question_action_conjugated = conjugate(question_action, tense", "- exclude_actions # print all_action_names df = pd.read_csv(dataset_output_file) editable_questions = [] i =", "df.iterrows(): if i % 1000 == 0: print \"Question: [%d/%d]\" % (i,total) i", "mood=INDICATIVE, aspect=PROGRESSIVE) data = {} data['image_file'] = image_file data['original_question'] = question data['question'] =", "= row['image_id'] data['qa_id'] = -1 * row['qa_id'] data['image_actions'] = ','.join(image_actions) editable_questions.append(data) noedit_data =", "- set(image_actions)) replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE)", "data = {} data['image_file'] = image_file data['original_question'] = question data['question'] = editable_question data['answer']", "extractors...\" nlp = spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir", "w for w in question.split()]) question_action_conjugated = conjugate(question_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE)", "+ 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir + 'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file)", "exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude) all_action_names = all_action_names - exclude_actions #", "data['image_actions'] = ','.join(image_actions) editable_questions.append(data) noedit_data = {} noedit_data['image_file'] = image_file noedit_data['original_question'] = question", "actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort() question = row['question'] doc = nlp(unicode(question)) question_action = row['original_question_action']", "= question noedit_data['answer'] = 'no edit because ' + question_action_conjugated noedit_data['original_answer_tense'] = question_action", "actions_df = pd.read_csv(output_actions_file) # print df all_action_names = set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer']", "data['qa_id'] = -1 * row['qa_id'] data['image_actions'] = ','.join(image_actions) editable_questions.append(data) noedit_data = {} noedit_data['image_file']", "NOUN import random from pattern.en import conjugate, PROGRESSIVE, INDICATIVE from utils import read_json", "= all_action_names - exclude_actions # print all_action_names df = pd.read_csv(dataset_output_file) editable_questions = []", "noedit_data['relevant'] = 1 noedit_data['image_id'] = row['image_id'] noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions'] = data['image_actions'] editable_questions.append(noedit_data)", "all_action_names = all_action_names - exclude_actions # print all_action_names df = pd.read_csv(dataset_output_file) editable_questions =", "question_action else w for w in question.split()]) question_action_conjugated = conjugate(question_action, tense = \"present\",", "+= ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude) all_action_names = all_action_names -", "% 1000 == 0: print \"Question: [%d/%d]\" % (i,total) i += 1 #", "import VERB, NOUN import random from pattern.en import conjugate, PROGRESSIVE, INDICATIVE from utils", "df = pd.read_csv(dataset_output_file) editable_questions = [] i = 0 total = len(df) for", "= row['image_file'] image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort() question = row['question'] doc =", "os import spacy from spacy.symbols import VERB, NOUN import random from pattern.en import", "mood=INDICATIVE, aspect=PROGRESSIVE) editable_question = ' '.join([replacement_action_conjugated if w == question_action else w for", "if i % 1000 == 0: print \"Question: [%d/%d]\" % (i,total) i +=", "['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude) all_action_names =", "exclude_actions # print all_action_names df = pd.read_csv(dataset_output_file) editable_questions = [] i = 0", "df all_action_names = set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude +=", "'/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir + 'question_action_data-v2.csv' editable_dataset_output_file = output_dir +", "random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question = ' '.join([replacement_action_conjugated", "0 total = len(df) for _,row in df.iterrows(): if i % 1000 ==", "noedit_data['question'] = question noedit_data['answer'] = 'no edit because ' + question_action_conjugated noedit_data['original_answer_tense'] =", "row['question'] doc = nlp(unicode(question)) question_action = row['original_question_action'] actions_not_in_image = list(all_action_names - set(image_actions)) replacement_action", "utils import read_json from common import save_data print \"Loading feature extractors...\" nlp =", "= nlp(unicode(question)) question_action = row['original_question_action'] actions_not_in_image = list(all_action_names - set(image_actions)) replacement_action = random.choice(actions_not_in_image)", "replacement_action_conjugated = conjugate(replacement_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question = ' '.join([replacement_action_conjugated if", "= 1 noedit_data['image_id'] = row['image_id'] noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions'] = data['image_actions'] editable_questions.append(noedit_data) editable_df", "edit because ' + question_action_conjugated noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action'] = question_action noedit_data['relevant'] =", "(i,total) i += 1 # print row image_file = row['image_file'] image_actions = actions_df[actions_df['image_file']", "'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file) # print df all_action_names = set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear',", "'no edit because ' + question_action_conjugated noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action'] = question_action noedit_data['relevant']", "\"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data = {} data['image_file'] = image_file data['original_question'] = question data['question']", "exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear',", "dataset_output_file = output_dir + 'question_action_data-v2.csv' editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/'", "'question_action_data-v2.csv' editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir +", "len(df) for _,row in df.iterrows(): if i % 1000 == 0: print \"Question:", "conjugate(question_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data = {} data['image_file'] = image_file data['original_question']", "output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir + 'action_image_data-v2.csv' actions_df =", "['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions", "nlp(unicode(question)) question_action = row['original_question_action'] actions_not_in_image = list(all_action_names - set(image_actions)) replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated", "spacy.symbols import VERB, NOUN import random from pattern.en import conjugate, PROGRESSIVE, INDICATIVE from", "== question_action else w for w in question.split()]) question_action_conjugated = conjugate(question_action, tense =", "1000 == 0: print \"Question: [%d/%d]\" % (i,total) i += 1 # print", "doc = nlp(unicode(question)) question_action = row['original_question_action'] actions_not_in_image = list(all_action_names - set(image_actions)) replacement_action =", "import save_data print \"Loading feature extractors...\" nlp = spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir", "editable_questions = [] i = 0 total = len(df) for _,row in df.iterrows():", "all_action_names = set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"]", "if w == question_action else w for w in question.split()]) question_action_conjugated = conjugate(question_action,", "exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude) all_action_names = all_action_names", "# print df all_action_names = set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building']", "print all_action_names df = pd.read_csv(dataset_output_file) editable_questions = [] i = 0 total =", "replacement_action_conjugated data['relevant'] = 0 data['image_id'] = row['image_id'] data['qa_id'] = -1 * row['qa_id'] data['image_actions']", "= actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort() question = row['question'] doc = nlp(unicode(question)) question_action =", "+ 'question_action_data-v2.csv' editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir", "exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude)", "VERB, NOUN import random from pattern.en import conjugate, PROGRESSIVE, INDICATIVE from utils import", "'.join([replacement_action_conjugated if w == question_action else w for w in question.split()]) question_action_conjugated =", "row['image_id'] noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions'] = data['image_actions'] editable_questions.append(noedit_data) editable_df = save_data(editable_questions, editable_dataset_output_file) #", "= editable_question data['answer'] = 'edit to ' + question_action_conjugated data['original_answer_tense'] = question_action data['replacement_action']", "editable_question = ' '.join([replacement_action_conjugated if w == question_action else w for w in", "PROGRESSIVE, INDICATIVE from utils import read_json from common import save_data print \"Loading feature", "from utils import read_json from common import save_data print \"Loading feature extractors...\" nlp", "+ question_action_conjugated data['original_answer_tense'] = question_action data['replacement_action'] = replacement_action_conjugated data['relevant'] = 0 data['image_id'] =", "from pattern.en import conjugate, PROGRESSIVE, INDICATIVE from utils import read_json from common import", "question.split()]) question_action_conjugated = conjugate(question_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data = {} data['image_file']", "\"Loading feature extractors...\" nlp = spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file", "for w in question.split()]) question_action_conjugated = conjugate(question_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data", "* row['qa_id'] data['image_actions'] = ','.join(image_actions) editable_questions.append(data) noedit_data = {} noedit_data['image_file'] = image_file noedit_data['original_question']", "'edit to ' + question_action_conjugated data['original_answer_tense'] = question_action data['replacement_action'] = replacement_action_conjugated data['relevant'] =", "1 # print row image_file = row['image_file'] image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort()", "+= ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude) all_action_names", "% (i,total) i += 1 # print row image_file = row['image_file'] image_actions =", "conjugate(replacement_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question = ' '.join([replacement_action_conjugated if w ==", "i % 1000 == 0: print \"Question: [%d/%d]\" % (i,total) i += 1", "= 'edit to ' + question_action_conjugated data['original_answer_tense'] = question_action data['replacement_action'] = replacement_action_conjugated data['relevant']", "question_action_conjugated noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action'] = question_action noedit_data['relevant'] = 1 noedit_data['image_id'] = row['image_id']", "question_action noedit_data['replacement_action'] = question_action noedit_data['relevant'] = 1 noedit_data['image_id'] = row['image_id'] noedit_data['qa_id'] = row['qa_id']", "= conjugate(question_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data = {} data['image_file'] = image_file", "row['image_file'] image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort() question = row['question'] doc = nlp(unicode(question))", "data['question'] = editable_question data['answer'] = 'edit to ' + question_action_conjugated data['original_answer_tense'] = question_action", "= ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building']", "list(all_action_names - set(image_actions)) replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action, tense = \"present\", mood=INDICATIVE,", "= row['qa_id'] noedit_data['image_actions'] = data['image_actions'] editable_questions.append(noedit_data) editable_df = save_data(editable_questions, editable_dataset_output_file) # print editable_df", "pd import os import spacy from spacy.symbols import VERB, NOUN import random from", "[] i = 0 total = len(df) for _,row in df.iterrows(): if i", "noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions'] = data['image_actions'] editable_questions.append(noedit_data) editable_df = save_data(editable_questions, editable_dataset_output_file) # print", "to ' + question_action_conjugated data['original_answer_tense'] = question_action data['replacement_action'] = replacement_action_conjugated data['relevant'] = 0", "'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions =", "data['image_id'] = row['image_id'] data['qa_id'] = -1 * row['qa_id'] data['image_actions'] = ','.join(image_actions) editable_questions.append(data) noedit_data", "dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir + 'question_action_data-v2.csv' editable_dataset_output_file =", "aspect=PROGRESSIVE) editable_question = ' '.join([replacement_action_conjugated if w == question_action else w for w", "set(image_actions)) replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) editable_question", "['picture','position','remote','paint',\"photograph\",\"smile\"] exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude) all_action_names = all_action_names - exclude_actions", "for _,row in df.iterrows(): if i % 1000 == 0: print \"Question: [%d/%d]\"", "image_actions.sort() question = row['question'] doc = nlp(unicode(question)) question_action = row['original_question_action'] actions_not_in_image = list(all_action_names", "+ question_action_conjugated noedit_data['original_answer_tense'] = question_action noedit_data['replacement_action'] = question_action noedit_data['relevant'] = 1 noedit_data['image_id'] =", "'show','use','dress','build','tennis','basketball','golf','baseball','building'] exclude_actions = set(exclude) all_action_names = all_action_names - exclude_actions # print all_action_names df", "= {} noedit_data['image_file'] = image_file noedit_data['original_question'] = question noedit_data['question'] = question noedit_data['answer'] =", "from common import save_data print \"Loading feature extractors...\" nlp = spacy.load('en') dataset_dir =", "\"Question: [%d/%d]\" % (i,total) i += 1 # print row image_file = row['image_file']", "= -1 * row['qa_id'] data['image_actions'] = ','.join(image_actions) editable_questions.append(data) noedit_data = {} noedit_data['image_file'] =", "question_action_conjugated = conjugate(question_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data = {} data['image_file'] =", "','.join(image_actions) editable_questions.append(data) noedit_data = {} noedit_data['image_file'] = image_file noedit_data['original_question'] = question noedit_data['question'] =", "total = len(df) for _,row in df.iterrows(): if i % 1000 == 0:", "image_file data['original_question'] = question data['question'] = editable_question data['answer'] = 'edit to ' +", "print df all_action_names = set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude", "image_file = row['image_file'] image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist() image_actions.sort() question = row['question'] doc", "= image_file noedit_data['original_question'] = question noedit_data['question'] = question noedit_data['answer'] = 'no edit because", "import conjugate, PROGRESSIVE, INDICATIVE from utils import read_json from common import save_data print", "= output_dir + 'question_action_data-v2.csv' editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file", "output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir + 'question_action_data-v2.csv' editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv'", "output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir + 'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file) # print", "+ 'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file) # print df all_action_names = set(actions_df['action'].tolist()) exclude =", "else w for w in question.split()]) question_action_conjugated = conjugate(question_action, tense = \"present\", mood=INDICATIVE,", "editable_question data['answer'] = 'edit to ' + question_action_conjugated data['original_answer_tense'] = question_action data['replacement_action'] =", "1 noedit_data['image_id'] = row['image_id'] noedit_data['qa_id'] = row['qa_id'] noedit_data['image_actions'] = data['image_actions'] editable_questions.append(noedit_data) editable_df =", "= set(actions_df['action'].tolist()) exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer'] exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building'] exclude += ['picture','position','remote','paint',\"photograph\",\"smile\"] exclude", "[%d/%d]\" % (i,total) i += 1 # print row image_file = row['image_file'] image_actions", "noedit_data['image_file'] = image_file noedit_data['original_question'] = question noedit_data['question'] = question noedit_data['answer'] = 'no edit", "question data['question'] = editable_question data['answer'] = 'edit to ' + question_action_conjugated data['original_answer_tense'] =", "in question.split()]) question_action_conjugated = conjugate(question_action, tense = \"present\", mood=INDICATIVE, aspect=PROGRESSIVE) data = {}", "data['replacement_action'] = replacement_action_conjugated data['relevant'] = 0 data['image_id'] = row['image_id'] data['qa_id'] = -1 *", "= os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir + 'question_action_data-v2.csv' editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir", "print \"Question: [%d/%d]\" % (i,total) i += 1 # print row image_file =", "all_action_names df = pd.read_csv(dataset_output_file) editable_questions = [] i = 0 total = len(df)", "+= 1 # print row image_file = row['image_file'] image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist()", "= row['original_question_action'] actions_not_in_image = list(all_action_names - set(image_actions)) replacement_action = random.choice(actions_not_in_image) replacement_action_conjugated = conjugate(replacement_action,", "pattern.en import conjugate, PROGRESSIVE, INDICATIVE from utils import read_json from common import save_data", "common import save_data print \"Loading feature extractors...\" nlp = spacy.load('en') dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/'", "'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv' output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/' output_actions_file = output_dir + 'action_image_data-v2.csv' actions_df = pd.read_csv(output_actions_file) #", "= question_action data['replacement_action'] = replacement_action_conjugated data['relevant'] = 0 data['image_id'] = row['image_id'] data['qa_id'] =", "= '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/' output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/') dataset_output_file = output_dir + 'question_action_data-v2.csv' editable_dataset_output_file = output_dir" ]
[ "pandas as pd class Model: def __init__(self): # Data: mnist dataset with open('data/mnist.pkl',", "y_test = test_set self.y_train = pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test) # CNN model with", "dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3 self.y = tf.nn.softmax(self.logits) '''Cost function &", "= tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) # conv layer self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME')", "= tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket = tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket) self.var_shape = [var.shape", "as pd class Model: def __init__(self): # Data: mnist dataset with open('data/mnist.pkl', 'rb')", "= tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32) self.b_fc3 =", "= tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024, 512],", "tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket) self.var_shape = [var.shape for var in self.var_bucket] # Gradients", "self.x = tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image = tf.reshape(self.x, [-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32, [None,", "pickle.load(f, encoding='latin1') self.x_train, y_train = train_set self.x_test, y_test = test_set self.y_train = pd.get_dummies(y_train)", "= tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) # conv layer self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME')", "test_set = pickle.load(f, encoding='latin1') self.x_train, y_train = train_set self.x_test, y_test = test_set self.y_train", "numpy as np import pandas as pd class Model: def __init__(self): # Data:", "dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1 =", "self.b_fc1) '''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2 =", "+ self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\",", "tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1) '''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC", "10]) '''First Conv layer''' # shape: [5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) #", "open('data/mnist.pkl', 'rb') as f: train_set, _, test_set = pickle.load(f, encoding='latin1') self.x_train, y_train =", "labels=self.y_) self.cost = tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket = tf.compat.v1.trainable_variables() self.var_size", "tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third Conv layer''' # shape: [5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\",", "layer self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv4 = tf.nn.relu(self.conv4", "shape: [5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) # shape: [32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\",", "tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step = self.optimizer.minimize(self.cost) # Create session", "self.x_train, y_train = train_set self.x_test, y_test = test_set self.y_train = pd.get_dummies(y_train) self.y_test =", "= tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1) '''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob)", "self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2 '''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer'''", "self.b_fc3 self.y = tf.nn.softmax(self.logits) '''Cost function & optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost", "self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3 self.y = tf.nn.softmax(self.logits) '''Cost function & optimizer'''", "strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2,", "self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost = tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket", "= pickle.load(f, encoding='latin1') self.x_train, y_train = train_set self.x_test, y_test = test_set self.y_train =", "tf.float32)) self.train_step = self.optimizer.minimize(self.cost) # Create session self.sess = tf.compat.v1.Session() # Initialize variables", "[None, 784]) self.x_image = tf.reshape(self.x, [-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First Conv", "self.y = tf.nn.softmax(self.logits) '''Cost function & optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost =", "shape: [5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) # shape: [64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\",", "'''Forth Conv layer''' # shape: [5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) # shape:", "self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth Conv", "= tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image = tf.reshape(self.x, [-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])", "tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2", "self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat,", "= tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3)", "CNN model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image = tf.reshape(self.x,", "= tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2 '''Dropout''' self.h_fc2_drop =", "tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) # shape: [256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) # conv", "[-1, 2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1) '''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop", "= tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3 self.y = tf.nn.softmax(self.logits)", "padding='SAME') # activation layer self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1],", "[None, 10]) '''First Conv layer''' # shape: [5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32)", "'''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32)", "self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second Conv layer''' # shape: [5,5,32,64] self.w_conv2", "= tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) # conv layer self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME')", "shape: [256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) # conv layer self.conv4 = tf.nn.conv2d(self.h_pool3,", "padding='SAME') # activation layer self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1],", "[5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) # shape: [256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256],", "dtype=tf.float32) # shape: [256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) # conv layer self.conv4", "with open('data/mnist.pkl', 'rb') as f: train_set, _, test_set = pickle.load(f, encoding='latin1') self.x_train, y_train", "tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2)", "tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost = tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket = tf.compat.v1.trainable_variables()", "self.cost = tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket = tf.compat.v1.trainable_variables() self.var_size =", "as tf import pickle import numpy as np import pandas as pd class", "self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024],", "with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image = tf.reshape(self.x, [-1,28,28,1]) self.y_", "tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4, [-1,", "layer self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC", "padding='SAME') # activation layer self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1],", "'''FC layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32)", "shape=[1024, 512], dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) +", "conv layer self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv3 =", "train_set self.x_test, y_test = test_set self.y_train = pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test) # CNN", "tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32)", "pd.get_dummies(y_test) # CNN model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image", "# activation layer self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1],", "layer self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv1 = tf.nn.relu(self.conv1", "# For evaluating self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step", "self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) # conv layer self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1],", "self.x_test, y_test = test_set self.y_train = pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test) # CNN model", "= tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third Conv layer'''", "dtype=tf.float32) # shape: [128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) # conv layer self.conv3", "import tensorflow as tf import pickle import numpy as np import pandas as", "= [var.shape for var in self.var_bucket] # Gradients self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket) #", "= tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First Conv layer''' # shape: [5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\",", "= tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32) self.b_fc2 =", "<gh_stars>1-10 import tensorflow as tf import pickle import numpy as np import pandas", "self.y_test = pd.get_dummies(y_test) # CNN model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32, [None,", "self.var_bucket = tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket) self.var_shape = [var.shape for var in self.var_bucket]", "layer''' # shape: [5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) # shape: [128] self.b_conv3", "as np import pandas as pd class Model: def __init__(self): # Data: mnist", "# activation layer self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1],", "= tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC layer1''' self.w_fc1", "layer self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth", "shape=[64], dtype=tf.float32) # conv layer self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME') # activation", "# shape: [64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) # conv layer self.conv2 =", "self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv4 = tf.nn.relu(self.conv4 +", "pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test) # CNN model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32,", "self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) # conv layer self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1],", "encoding='latin1') self.x_train, y_train = train_set self.x_test, y_test = test_set self.y_train = pd.get_dummies(y_train) self.y_test", "+ self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second Conv layer''' # shape:", "self.var_shape = [var.shape for var in self.var_bucket] # Gradients self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket)", "shape: [32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) # conv layer self.conv1 = tf.nn.conv2d(self.x_image,", "# conv layer self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv2", "# shape: [128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) # conv layer self.conv3 =", "For evaluating self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step =", "pd class Model: def __init__(self): # Data: mnist dataset with open('data/mnist.pkl', 'rb') as", "= tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step = self.optimizer.minimize(self.cost) # Create", "pickle import numpy as np import pandas as pd class Model: def __init__(self):", "tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) # shape: [128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) # conv", "self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) # conv layer self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1],", "# shape: [256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) # conv layer self.conv4 =", "Conv layer''' # shape: [5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) # shape: [256]", "self.w_fc3) + self.b_fc3 self.y = tf.nn.softmax(self.logits) '''Cost function & optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,", "= tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second Conv layer''' # shape: [5,5,32,64] self.w_conv2 =", "'rb') as f: train_set, _, test_set = pickle.load(f, encoding='latin1') self.x_train, y_train = train_set", "model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image = tf.reshape(self.x, [-1,28,28,1])", "ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1 =", "import pickle import numpy as np import pandas as pd class Model: def", "= tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) # conv layer self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME')", "2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1) '''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop =", "shape: [64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) # conv layer self.conv2 = tf.nn.conv2d(self.h_pool1,", "dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2 '''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax", "strides=[1,2,2,1], padding='SAME') '''Forth Conv layer''' # shape: [5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32)", "self.w_conv3, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3 =", "self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third Conv layer''' # shape: [5,5,64,128]", "self.var_bucket) # For evaluating self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32))", "ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third Conv layer''' # shape: [5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128],", "layer self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second", "strides=[1,2,2,1], padding='SAME') '''Third Conv layer''' # shape: [5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32)", "self.w_conv1, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1 =", "tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC layer1''' self.w_fc1 =", "tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket = tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket) self.var_shape = [var.shape for", "# shape: [5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) # shape: [64] self.b_conv2 =", "= tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4)", "self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second Conv layer''' # shape: [5,5,32,64]", "+ self.b_fc3 self.y = tf.nn.softmax(self.logits) '''Cost function & optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_)", "ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth Conv layer''' # shape: [5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256],", "= tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket) self.var_shape = [var.shape for var in self.var_bucket] #", "tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step = self.optimizer.minimize(self.cost) # Create session self.sess = tf.compat.v1.Session() # Initialize", "Data: mnist dataset with open('data/mnist.pkl', 'rb') as f: train_set, _, test_set = pickle.load(f,", "# activation layer self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1],", "reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image = tf.reshape(self.x, [-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32,", "dtype=tf.float32) # shape: [32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) # conv layer self.conv1", "self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth Conv layer''' # shape: [5,5,128,256] self.w_conv4", "f: train_set, _, test_set = pickle.load(f, encoding='latin1') self.x_train, y_train = train_set self.x_test, y_test", "tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1", "# shape: [5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) # shape: [256] self.b_conv4 =", "tf.nn.softmax(self.logits) '''Cost function & optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost = tf.reduce_mean(self.loss) self.optimizer", "dataset with open('data/mnist.pkl', 'rb') as f: train_set, _, test_set = pickle.load(f, encoding='latin1') self.x_train,", "tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) # conv layer self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME') #", "var in self.var_bucket] # Gradients self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket) # For evaluating self.prediction", "class Model: def __init__(self): # Data: mnist dataset with open('data/mnist.pkl', 'rb') as f:", "rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10],", "self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256,", "np import pandas as pd class Model: def __init__(self): # Data: mnist dataset", "= tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step = self.optimizer.minimize(self.cost) # Create session self.sess = tf.compat.v1.Session() #", "= tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) # shape: [128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) #", "# conv layer self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv4", "padding='SAME') '''Second Conv layer''' # shape: [5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) #", "train_set, _, test_set = pickle.load(f, encoding='latin1') self.x_train, y_train = train_set self.x_test, y_test =", "tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) +", "shape=[5,5,1,32], dtype=tf.float32) # shape: [32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) # conv layer", "self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third Conv layer''' # shape: [5,5,64,128] self.w_conv3", "dtype=tf.float32) # conv layer self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME') # activation layer", "'''First Conv layer''' # shape: [5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) # shape:", "tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3 self.y = tf.nn.softmax(self.logits) '''Cost", "self.optimizer.compute_gradients(self.cost, self.var_bucket) # For evaluating self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction,", "dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1) '''Dropout'''", "= len(self.var_bucket) self.var_shape = [var.shape for var in self.var_bucket] # Gradients self.grads =", "for var in self.var_bucket] # Gradients self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket) # For evaluating", "self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits =", "self.b_fc2 '''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512, 10],", "self.y_train = pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test) # CNN model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x", "self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step = self.optimizer.minimize(self.cost) #", "mnist dataset with open('data/mnist.pkl', 'rb') as f: train_set, _, test_set = pickle.load(f, encoding='latin1')", "+ self.b_fc1) '''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2", "self.var_size = len(self.var_bucket) self.var_shape = [var.shape for var in self.var_bucket] # Gradients self.grads", "tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth Conv layer''' #", "= tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) # shape: [32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) #", "& optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost = tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) #", "784]) self.x_image = tf.reshape(self.x, [-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First Conv layer'''", "[var.shape for var in self.var_bucket] # Gradients self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket) # For", "[5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) # shape: [128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128],", "Conv layer''' # shape: [5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) # shape: [64]", "padding='SAME') '''Forth Conv layer''' # shape: [5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) #", "= tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket = tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket)", "shape=[5,5,128,256], dtype=tf.float32) # shape: [256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) # conv layer", "padding='SAME') # activation layer self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1],", "self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) # shape: [256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32)", "tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2 '''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3 =", "shape: [128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) # conv layer self.conv3 = tf.nn.conv2d(self.h_pool2,", "1024], dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1", "layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits", "dtype=tf.float32) # conv layer self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME') # activation layer", "tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second Conv layer''' # shape: [5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\",", "'''Cost function & optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost = tf.reduce_mean(self.loss) self.optimizer =", "function & optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost = tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4)", "tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1", "self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First Conv layer''' # shape: [5,5,1,32] self.w_conv1 =", "= tf.reshape(self.x, [-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First Conv layer''' # shape:", "self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth Conv layer''' # shape: [5,5,128,256]", "tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step = self.optimizer.minimize(self.cost) # Create session self.sess", "shape=[10], dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3 self.y = tf.nn.softmax(self.logits) '''Cost function", "tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2 '''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2,", "= tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32)", "'''Third Conv layer''' # shape: [5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) # shape:", "= tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1)", "ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second Conv layer''' # shape: [5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64],", "tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4", "activation layer self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')", "y_train = train_set self.x_test, y_test = test_set self.y_train = pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test)", "tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) # shape: [64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) # conv", "= tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1)", "strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1,", "strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3,", "[5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) # shape: [64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64],", "layer self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third", "conv layer self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv4 =", "self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) # conv layer self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1],", "# conv layer self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv3", "_, test_set = pickle.load(f, encoding='latin1') self.x_train, y_train = train_set self.x_test, y_test = test_set", "strides=[1,2,2,1], padding='SAME') '''Second Conv layer''' # shape: [5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32)", "self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2 '''Dropout''' self.h_fc2_drop", "shape: [5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) # shape: [128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\",", "shape=[256], dtype=tf.float32) # conv layer self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME') # activation", "self.x_image = tf.reshape(self.x, [-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First Conv layer''' #", "self.w_conv2, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2 =", "rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512],", "# activation layer self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1],", "conv layer self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv1 =", "tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First Conv layer''' # shape: [5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32],", "dtype=tf.float32) # conv layer self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME') # activation layer", "= tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4,", "self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3 self.y =", "self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step = self.optimizer.minimize(self.cost) # Create session self.sess = tf.compat.v1.Session()", "def __init__(self): # Data: mnist dataset with open('data/mnist.pkl', 'rb') as f: train_set, _,", "tf import pickle import numpy as np import pandas as pd class Model:", "shape: [5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) # shape: [256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\",", "padding='SAME') '''FC layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024],", "= self.optimizer.compute_gradients(self.cost, self.var_bucket) # For evaluating self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy =", "self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket) # For evaluating self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy", "+ self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth Conv layer''' # shape:", "activation layer self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')", "shape=[5,5,64,128], dtype=tf.float32) # shape: [128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) # conv layer", "tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) # conv layer self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME') #", "dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2 '''Dropout'''", "= pd.get_dummies(y_test) # CNN model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32, [None, 784])", "# Variables self.var_bucket = tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket) self.var_shape = [var.shape for var", "Gradients self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket) # For evaluating self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1))", "len(self.var_bucket) self.var_shape = [var.shape for var in self.var_bucket] # Gradients self.grads = self.optimizer.compute_gradients(self.cost,", "tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\",", "self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2 =", "# shape: [32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) # conv layer self.conv1 =", "self.train_step = self.optimizer.minimize(self.cost) # Create session self.sess = tf.compat.v1.Session() # Initialize variables self.sess.run(tf.compat.v1.global_variables_initializer())", "10], dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3", "Model: def __init__(self): # Data: mnist dataset with open('data/mnist.pkl', 'rb') as f: train_set,", "[64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) # conv layer self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2,", "= tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third Conv layer''' # shape: [5,5,64,128] self.w_conv3 =", "tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3)", "= tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop,", "test_set self.y_train = pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test) # CNN model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE):", "self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv1 = tf.nn.relu(self.conv1 +", "tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third Conv layer''' #", "layer''' # shape: [5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) # shape: [32] self.b_conv1", "= tf.nn.softmax(self.logits) '''Cost function & optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost = tf.reduce_mean(self.loss)", "# Gradients self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket) # For evaluating self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_,", "layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2", "layer self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv2 = tf.nn.relu(self.conv2", "tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) # conv layer self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME') #", "layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat", "tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image = tf.reshape(self.x, [-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First", "= tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1) '''Dropout''' self.keep_prob =", "self.var_bucket] # Gradients self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket) # For evaluating self.prediction = tf.equal(tf.argmax(self.y,1),", "# CNN model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image =", "self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv3 = tf.nn.relu(self.conv3 +", "tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second Conv layer''' #", "tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3 self.y = tf.nn.softmax(self.logits) '''Cost function & optimizer''' self.loss =", "self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1) '''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1,", "as f: train_set, _, test_set = pickle.load(f, encoding='latin1') self.x_train, y_train = train_set self.x_test,", "shape=[32], dtype=tf.float32) # conv layer self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME') # activation", "optimizer''' self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost = tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) # Variables", "in self.var_bucket] # Gradients self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket) # For evaluating self.prediction =", "self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat =", "__init__(self): # Data: mnist dataset with open('data/mnist.pkl', 'rb') as f: train_set, _, test_set", "strides=[1,2,2,1], padding='SAME') '''FC layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\",", "= tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) # shape: [64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) #", "padding='SAME') '''Third Conv layer''' # shape: [5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) #", "activation layer self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')", "tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket = tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket) self.var_shape", "= tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3 self.y = tf.nn.softmax(self.logits) '''Cost function & optimizer''' self.loss", "Conv layer''' # shape: [5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) # shape: [32]", "= tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second Conv layer'''", "import numpy as np import pandas as pd class Model: def __init__(self): #", "'''Softmax layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32)", "tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) # conv layer self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME') #", "layer''' # shape: [5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) # shape: [256] self.b_conv4", "import pandas as pd class Model: def __init__(self): # Data: mnist dataset with", "self.w_fc1) + self.b_fc1) '''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2'''", "self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Second Conv", "# conv layer self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv1", "layer''' # shape: [5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) # shape: [64] self.b_conv2", "self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket = tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket) self.var_shape =", "self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32) self.b_fc2", "= train_set self.x_test, y_test = test_set self.y_train = pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test) #", "self.w_conv4, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4 =", "[-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First Conv layer''' # shape: [5,5,1,32] self.w_conv1", "+ self.b_fc2 '''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512,", "shape=[512, 10], dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) +", "dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3 self.y", "self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) # shape: [64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32)", "Variables self.var_bucket = tf.compat.v1.trainable_variables() self.var_size = len(self.var_bucket) self.var_shape = [var.shape for var in", "conv layer self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv2 =", "tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x = tf.compat.v1.placeholder(tf.float32, [None, 784]) self.x_image = tf.reshape(self.x, [-1,28,28,1]) self.y_ =", "'''Second Conv layer''' # shape: [5,5,32,64] self.w_conv2 = tf.compat.v1.get_variable(\"v2\", shape=[5,5,32,64], dtype=tf.float32) # shape:", "shape=[5,5,32,64], dtype=tf.float32) # shape: [64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) # conv layer", "tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3", "shape=[128], dtype=tf.float32) # conv layer self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME') # activation", "[5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) # shape: [32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32],", "strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4,", "= tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2 '''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3", "self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv2 = tf.nn.relu(self.conv2 +", "self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third Conv", "self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) # shape: [128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32)", "dtype=tf.float32) # conv layer self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME') # activation layer", "tf.reshape(self.x, [-1,28,28,1]) self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10]) '''First Conv layer''' # shape: [5,5,1,32]", "shape=[512], dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2 '''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob)", "tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth Conv layer''' # shape: [5,5,128,256] self.w_conv4 = tf.compat.v1.get_variable(\"v6\",", "= pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test) # CNN model with tf.compat.v1.variable_scope(\"mnist\", reuse=tf.compat.v1.AUTO_REUSE): self.x =", "= tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2)", "tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1) '''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32)", "layer self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME') # activation layer self.h_conv3 = tf.nn.relu(self.conv3", "tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) # shape: [32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) # conv", "512], dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\", shape=[512], dtype=tf.float32) self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2", "= test_set self.y_train = pd.get_dummies(y_train) self.y_test = pd.get_dummies(y_test) # CNN model with tf.compat.v1.variable_scope(\"mnist\",", "= tf.compat.v1.get_variable(\"v6\", shape=[5,5,128,256], dtype=tf.float32) # shape: [256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) #", "# Data: mnist dataset with open('data/mnist.pkl', 'rb') as f: train_set, _, test_set =", "[128] self.b_conv3 = tf.compat.v1.get_variable(\"v5\", shape=[128], dtype=tf.float32) # conv layer self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3,", "= tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_) self.cost = tf.reduce_mean(self.loss) self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4) # Variables self.var_bucket =", "# shape: [5,5,1,32] self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) # shape: [32] self.b_conv1 =", "self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024,", "+ self.b_conv2) self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Third Conv layer''' # shape:", "activation layer self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1) self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')", "self.w_fc2) + self.b_fc2 '''Dropout''' self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\",", "self.w_conv1 = tf.compat.v1.get_variable(\"v0\", shape=[5,5,1,32], dtype=tf.float32) # shape: [32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32)", "self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob) '''Softmax layer''' self.w_fc3 = tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32) self.b_fc3", "'''FC layer1''' self.w_fc1 = tf.compat.v1.get_variable(\"v8\", shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32)", "[32] self.b_conv1 = tf.compat.v1.get_variable(\"v1\", shape=[32], dtype=tf.float32) # conv layer self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1,", "dtype=tf.float32) # shape: [64] self.b_conv2 = tf.compat.v1.get_variable(\"v3\", shape=[64], dtype=tf.float32) # conv layer self.conv2", "self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1) '''Dropout''' self.keep_prob", "shape=[1024], dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256]) self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1)", "'''Dropout''' self.keep_prob = tf.compat.v1.placeholder(tf.float32) self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\",", "Conv layer''' # shape: [5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) # shape: [128]", "= tf.compat.v1.get_variable(\"v12\", shape=[512, 10], dtype=tf.float32) self.b_fc3 = tf.compat.v1.get_variable(\"v13\", shape=[10], dtype=tf.float32) self.logits = tf.matmul(self.h_fc2_drop,", "# shape: [5,5,64,128] self.w_conv3 = tf.compat.v1.get_variable(\"v4\", shape=[5,5,64,128], dtype=tf.float32) # shape: [128] self.b_conv3 =", "tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob) '''FC layer2''' self.w_fc2 = tf.compat.v1.get_variable(\"v10\", shape=[1024, 512], dtype=tf.float32) self.b_fc2 = tf.compat.v1.get_variable(\"v11\",", "= tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth Conv layer''' # shape: [5,5,128,256] self.w_conv4 =", "self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4) self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''FC layer1'''", "= tf.nn.relu(self.conv3 + self.b_conv3) self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') '''Forth Conv layer'''", "[256] self.b_conv4 = tf.compat.v1.get_variable(\"v7\", shape=[256], dtype=tf.float32) # conv layer self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4,", "1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step = self.optimizer.minimize(self.cost) # Create session self.sess =", "shape=[2*2*256, 1024], dtype=tf.float32) self.b_fc1 = tf.compat.v1.get_variable(\"v9\", shape=[1024], dtype=tf.float32) self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256])", "evaluating self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32)) self.train_step = self.optimizer.minimize(self.cost)", "tensorflow as tf import pickle import numpy as np import pandas as pd" ]
[ "for i, n in enumerate(D): yield ( Node(i, speak_str=i, speak_voice=\"en\"), Node(n, speak_str=n, speak_voice=\"de\"),", "D = ['null','eins','zwei','drei','vier','fünf','sechs','sieben','acht','neun'] def graph(): for i, n in enumerate(D): yield ( Node(i,", "<filename>tutorial/de.digits.mg/graph.py from mg.graph import Node D = ['null','eins','zwei','drei','vier','fünf','sechs','sieben','acht','neun'] def graph(): for i, n", "['null','eins','zwei','drei','vier','fünf','sechs','sieben','acht','neun'] def graph(): for i, n in enumerate(D): yield ( Node(i, speak_str=i, speak_voice=\"en\"),", "= ['null','eins','zwei','drei','vier','fünf','sechs','sieben','acht','neun'] def graph(): for i, n in enumerate(D): yield ( Node(i, speak_str=i,", "graph(): for i, n in enumerate(D): yield ( Node(i, speak_str=i, speak_voice=\"en\"), Node(n, speak_str=n,", "import Node D = ['null','eins','zwei','drei','vier','fünf','sechs','sieben','acht','neun'] def graph(): for i, n in enumerate(D): yield", "from mg.graph import Node D = ['null','eins','zwei','drei','vier','fünf','sechs','sieben','acht','neun'] def graph(): for i, n in", "i, n in enumerate(D): yield ( Node(i, speak_str=i, speak_voice=\"en\"), Node(n, speak_str=n, speak_voice=\"de\"), )", "mg.graph import Node D = ['null','eins','zwei','drei','vier','fünf','sechs','sieben','acht','neun'] def graph(): for i, n in enumerate(D):", "Node D = ['null','eins','zwei','drei','vier','fünf','sechs','sieben','acht','neun'] def graph(): for i, n in enumerate(D): yield (", "def graph(): for i, n in enumerate(D): yield ( Node(i, speak_str=i, speak_voice=\"en\"), Node(n," ]
[ "), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where') + os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')),", "], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), ( [ pd.Series(", "index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame( {'int': [1, 2], 'str':", "num_workers=3, ) assert audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files = frame.index.get_level_values(define.IndexField.FILE).values", "utils.hash(obj) == expected assert utils.hash(obj[::-1]) == expected @pytest.mark.parametrize( 'objs, expected', [ ( [],", "name='value', ), ), ( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index(", "False, None, None, audformat.segmented_index(), ), # allow nat ( audformat.filewise_index(pytest.DB.files[:2]), True, None, None,", "None, audformat.filewise_index(['f1', 'f2.mp3']), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), '', None, audformat.filewise_index(['f1', 'f2']), ), (", "# combine series and overwrite values ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ],", "['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1',", "['f1', 'f1'], [0, 0], [None, 1], ), ), ), # combine series and", "= audformat.Scheme(labels={'a': [1, 2], 'b': [3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id') assert db1.schemes['scheme_id'] == expected", "extension, pattern, expected_index', [ ( audformat.filewise_index(), 'mp3', None, audformat.filewise_index(), ), ( audformat.segmented_index(), 'mp3',", "1}], {'a': 0, 'b': 1}, ), ( [{'a': 0}, {'b': 1, 'c': 2}],", "pytest.param( StringIO('''value 0.0 1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) ) ]) def test_read_csv(csv, result): obj", "), pytest.param( 'Bad language', None, marks=pytest.mark.xfail(raises=ValueError) ) ] ) def test_map_language(language, expected): assert", "pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), ], )", "'f2']), ], audformat.filewise_index(['f1', 'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']),", "audformat.filewise_index(['f1', 'f2']), dtype='Int64' ), ), ( [ pd.Series( True, audformat.filewise_index('f1'), dtype='bool', ), pd.Series(", "], audformat.segmented_index( ['f1', 'f2', 'f2', 'f3'], [0, 0, 1, 1], [1, 1, 2,", "1, pd.NaT, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ),", "'f2']), dtype='Int64', ), ), # combine series with different names ( [ pd.Series([1.],", "'f2', 'f3']), columns=['value1', 'value2'], ), ), ( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0,", "'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ),", "), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),", "2], ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pd.NaT, pd.NaT], ), False, {", "audformat import utils from audformat import define @pytest.mark.parametrize( 'objs, overwrite, expected', [ #", "output folder was created and can be removed if os.path.exists(output_folder): shutil.rmtree(output_folder) else: if", "expected_duration', [ ( audformat.segmented_index(), pd.Timedelta(0, unit='s'), ), ( audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2, unit='s'),", "[np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1',", "'f2']), os.path.join('some', 'where') + os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')),", "[1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1',", "), pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad language', None, marks=pytest.mark.xfail(raises=ValueError) ) ]", "pytest.FILE_DUR * 2], ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pd.NaT, pd.NaT], ),", "['1s', '3s'], ['2s', '4s'], ), '.', audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s', '3s'],", "[ # empty ( [], False, pd.Series([], audformat.filewise_index(), dtype='object'), ), ( [pd.Series([], audformat.filewise_index(),", "audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f3'],", "( audformat.filewise_index(), False, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), True, None, None, audformat.segmented_index(),", "dtype='int64', ), pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ], False, pd.Series( [1,", "( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2']),", "and not allow_nat: # for filewise tables we expect a duration for every", "audformat.filewise_index('f3'), ], audformat.filewise_index(), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(),", "def test_join_labels(labels, expected): assert utils.join_labels(labels) == expected def test_join_schemes(): # Empty list audformat.utils.join_schemes([],", "['a', 'b', 'c'], ), ( (['a'], ['a']), ['a'], ), ( [{'a': 0}], {'a':", "audformat.filewise_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ),", "), ( (['a'], ['b', 'c']), ['a', 'b', 'c'], ), ( (['a'], ['a']), ['a'],", "audformat.filewise_index(), dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ),", "'f2', 'f3']), ) ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "[0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1', 'value2'],", "audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']),", "[1.], audformat.filewise_index('f1'), name='c1'), pd.Series( [2.], audformat.segmented_index('f1', 0, 1), name='c2', ), ], False, pd.DataFrame(", "None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series and frame ( pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]),", "] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')),", "for file in files: file = os.path.join(root, file) assert file in files_duration @pytest.mark.parametrize(", "combine series with non-nullable dtype ( [ pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2],", "audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ], False, pd.DataFrame( { 'c1': [1.], 'c2': [2.],", "'c1': [1., 2.], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [", "isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj, expected_duration', [ ( audformat.segmented_index(),", "NaT, but file is missing pytest.param( audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] )", "), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame( {'int': [1, 2], 'str': ['a',", "index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series(", "{ 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.filewise_index(['f1', 'f2', 'f3']),", "values ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], True, pd.Series([1.], audformat.filewise_index('f1')), ), (", "audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), ), ( audformat.filewise_index(['f1', 'f2']),", "= audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert", "test_replace_file_extension(index, extension, pattern, expected_index): index = audformat.utils.replace_file_extension( index, extension, pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index)", "('en', 'eng'), ('en', 'eng'), ('english', 'eng'), ('English', 'eng'), pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError) ),", "assert utils.map_language(language) == expected @pytest.mark.parametrize('csv,result', [ ( StringIO('''file f1 f2 f3'''), pd.Index( ['f1',", "'0s', '0s'], ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02", "dtype='category', ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False,", "pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index(pytest.DB.files[:2]), False, None,", "'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), ( audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']),", "audformat import define @pytest.mark.parametrize( 'objs, overwrite, expected', [ # empty ( [], False,", "to `root` files = [os.path.join(pytest.DB_ROOT, f) for f in files] assert all(os.path.exists(f) for", "3]}, audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), ), '-103439349488189352', ), ] ) def", "[0, 0], [1, 1]), ), '-103439349488189352', ), ] ) def test_hash(obj, expected): assert", "database db3 = audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] = scheme3 expected =", "['a', 'b', 'c'], }, index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1', 'value2'], ), ), ( StringIO('''file,start,value", "audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']),", "2], 'b': [3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id') assert db1.schemes['scheme_id'] == expected assert db2.schemes['scheme_id'] ==", "pd.NaT], ), ), # forbid nat ( audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2],", "audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language, expected', [ ('en', 'eng'), ('en', 'eng'), ('english', 'eng'),", "'f3']), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'),", "( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ],", "'f2', 'f3', 'f4']), ), ), # error: dtypes do not match pytest.param( [", "f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'],", "), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pd.NaT, pd.NaT], ), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]):", "starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), columns=['value1', 'value2'], ), ), pytest.param( StringIO('''value", "'f2'], [0, 0], [1, 1]), '-3831446135233514455', ), ( pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810',", "[1, 1], [2, 2]), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(),", "= files[mask] for file in files: file = os.path.join(root, file) assert file in", "audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) + os.path.sep +", "audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language, expected', [ ('en', 'eng'), ('en',", "), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ] ) def test_to_segmented_index(obj, allow_nat, files_duration, root,", "audformat.filewise_index('f1')), ], False, pd.Series([np.nan], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ],", "[4], 'b': [3]}) audformat.utils.join_schemes([db1, db2, db3], 'scheme_id') # Fail for schemes without labels", "name='c2'), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1',", "'f3']), dtype='category', ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category',", "expected', [ # empty ( [], False, pd.Series([], audformat.filewise_index(), dtype='object'), ), ( [pd.Series([],", "pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), # combine values in", "0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index(", "], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64' ), ), ( [ pd.Series(", "( [{'a': 0}], {'a': 0}, ), ( [{'a': 0}, {'b': 1}], {'a': 0,", "pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR * 2],", "pd.Series([], audformat.segmented_index(), dtype='object') ), ( [pd.DataFrame([], audformat.segmented_index(), dtype='object')], False, pd.DataFrame([], audformat.segmented_index(), dtype='object') ),", "), name='value', ), ), ( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0],", "'f3'], [0, 0], [1, 1]), ], audformat.segmented_index( ['f1', 'f2', 'f3'], [0, 0, 0],", "), ), ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ], False, pd.DataFrame(", "2}, ), ( [{'a': 0, 'b': 1}, {'b': 2, 'c': 2}], {'a': 0,", "# Three database db3 = audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] = scheme3", "), ( pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ), ( pd.DataFrame( {'a': [0, 1],", "pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64' ), ), ( [ pd.Series( True, audformat.filewise_index('f1'),", "'c'], }, audformat.segmented_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c1': [1., 2., 3.,", "'f3'], [0, 0, 0], [1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'],", "audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id'] == scheme1 # Two databases db2 = audformat.Database('db2') scheme2", "@pytest.mark.parametrize( 'index, root, expected', [ ( audformat.filewise_index(), None, audformat.filewise_index(), ), ( audformat.segmented_index(), None,", "), ), # provide file durations ( audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR", "( audformat.filewise_index(), 'mp3', None, audformat.filewise_index(), ), ( audformat.segmented_index(), 'mp3', None, audformat.segmented_index(), ), (", "@pytest.mark.parametrize( 'index, extension, pattern, expected_index', [ ( audformat.filewise_index(), 'mp3', None, audformat.filewise_index(), ), (", "not match pytest.param( [ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ),", "[ audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(), ), (", "<gh_stars>1-10 from io import StringIO import os import shutil import numpy as np", "result): obj = audformat.utils.read_csv(csv) if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result) elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj,", "), ), # error: dtypes do not match pytest.param( [ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.],", "audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index( [ audeer.safe_path('f1'),", "pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( [2.], audformat.filewise_index('f2'), name='c2', ), ],", "[ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(),", "'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index('f2',", "), ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series(", "'f2.mp3']), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), '', None, audformat.filewise_index(['f1', 'f2']), ), ( audformat.filewise_index(['f1.ogg', 'f2.wav']),", "0}, ), ( [{'a': 0}, {'b': 1}], {'a': 0, 'b': 1}, ), (", "[0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index( ['f1',", "pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame( {'int':", "1], 'b': [2, 3]}, audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), ), '-103439349488189352', ),", "'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1',", "], audformat.segmented_index( ['f1', 'f1', 'f2', 'f3'], [0, 0, 0, 0], [pd.NaT, 1, 1,", "audformat.filewise_index(), False, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), True, None, None, audformat.segmented_index(), ),", "['c']], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a', 'b'], ['b', 'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError), ),", "2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ), name='value',", "db3.schemes['scheme_id'] = scheme3 expected = audformat.Scheme(labels={'a': [4], 'b': [3]}) audformat.utils.join_schemes([db1, db2, db3], 'scheme_id')", "'f2', 'f4']), name='c2', ), pd.DataFrame( { 'c1': [np.nan, 3.], 'c2': ['b', 'c'], },", "pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), ( [", "'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "), ), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0],", "'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ] ) def", "'f2', 'f3']), ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), )", "pytest.DB.files[:2], [0.1, 0.5], [0.2, pytest.FILE_DUR], ), ), # provide file durations ( audformat.filewise_index(pytest.DB.files[:2]),", "dtype='object') ), ( [pd.DataFrame([], audformat.segmented_index(), dtype='object')], False, pd.DataFrame([], audformat.segmented_index(), dtype='object') ), # combine", "'c': 2}, ), ( [{'a': 0, 'b': 1}, {'b': 2, 'c': 2}], {'a':", "1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ),", "pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( [1., 2.],", "@pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ), ( [ audformat.filewise_index(), ], audformat.filewise_index(),", "0.5], [0.2, pd.NaT], ), ), # forbid nat ( audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT,", "1]), ), '-103439349488189352', ), ] ) def test_hash(obj, expected): assert utils.hash(obj) == expected", "file # for segmented only where end == NaT files = result.get_level_values(audformat.define.IndexField.FILE) if", "'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index(),", "audeer.safe_path('f2'), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where',", ") def test_hash(obj, expected): assert utils.hash(obj) == expected assert utils.hash(obj[::-1]) == expected @pytest.mark.parametrize(", "1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'],", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0,", "audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.],", "audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.],", "audformat.filewise_index(['f1', 'f2.mp3']), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), '', None, audformat.filewise_index(['f1', 'f2']), ), ( audformat.filewise_index(['f1.ogg',", "audformat.segmented_index(), ), ( audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), ),", "1.0, 2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value', ), ), ( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''),", "audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize( 'labels, expected', [ ( [], [], ), ( (['a'],", "def test_map_language(language, expected): assert utils.map_language(language) == expected @pytest.mark.parametrize('csv,result', [ ( StringIO('''file f1 f2", "audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f2'), ] ), ), ( audformat.segmented_index( ['f1', 'f2'],", "None, audformat.segmented_index(), ), ( audformat.segmented_index(), True, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), False,", "0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0,", "], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [", "starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ), name='value', ), ), ( StringIO('''file,end,value f1,00:00:01,0.0", "), ), # forbid nat ( audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0,", "2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1', 'value2'], ), ),", "'f2']), dtype='float64', ), ], False, pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ),", "files = files[mask] for file in files: file = os.path.join(root, file) assert file", "( audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9, unit='s'), ), ( audformat.segmented_index(['f1', 'f2'], [0, 1], [2,", "file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in files] assert file_names == expected_file_names #", "), ), ( audformat.segmented_index( ['f1', 'f2'], ['1s', '3s'], ['2s', '4s'], ), '.', audformat.segmented_index(", "[1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "'value2'], ), ), ( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index(", "audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2',", "expected', [ ( [], audformat.filewise_index(), ), ( [ audformat.filewise_index(), ], audformat.filewise_index(), ), (", "), ( [ pd.Series( 1, audformat.filewise_index('f1'), dtype='int64', ), pd.Series( 2, audformat.filewise_index('f2'), dtype='int64', ),", ") ), # combine series with non-nullable dtype ( [ pd.Series([1, 2], audformat.filewise_index(['f1',", "if not isinstance(result, pd.Index): result = result.index pd.testing.assert_index_equal(result, expected) if files_duration and not", "}, index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1', 'value2'], ), ), ( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''),", "dtype='bool', ), pd.Series( True, audformat.filewise_index('f2'), dtype='bool', ), ], False, pd.Series( True, audformat.filewise_index(['f1', 'f2']),", "# series and frame ( pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None,", "[2]), pd.Timedelta(1.9, unit='s'), ), ( audformat.segmented_index(['f1', 'f2'], [0, 1], [2, 2]), pd.Timedelta(3, unit='s'),", "'2s'], ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''),", "), ( [{'a': 0}, {'b': 1}], {'a': 0, 'b': 1}, ), ( [{'a':", "False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5],", "audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask = result.get_level_values( audformat.define.IndexField.END ) == pd.NaT files = files[mask]", "audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1',", "audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1', 'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2',", "), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where') + os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where',", "0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(), ),", "[1, 2], 'b': [3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id') assert db1.schemes['scheme_id'] == expected assert db2.schemes['scheme_id']", "0, 'b': 1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a', 'b'], ['b', 'c'], 'd'],", "missing pytest.param( audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] ) def test_duration(obj, expected_duration): duration", "expected_duration @pytest.mark.parametrize( 'index, root, expected', [ ( audformat.filewise_index(), None, audformat.filewise_index(), ), ( audformat.segmented_index(),", "'output_folder,table_id,expected_file_names', [ pytest.param( '.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError)", "pd.NaT, pd.NaT]), ), name='value', ), ), ( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0,", "( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2],", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2', 0, 1), ), ] ) def test_intersect(objs,", "pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ) ), # combine series", "'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'),", "2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ),", "], True, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], True,", "[ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [", "'f3', 'f4']), ), ), # error: dtypes do not match pytest.param( [ pd.Series([1],", "), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series(", "for segmented only where end == NaT files = result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) ==", "{ 'c': [2., 3.] }, audformat.filewise_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c':", "audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ( [ audformat.segmented_index(['f1',", "False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64' ), ), ( [ pd.Series( True,", "False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), # combine series with", "], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [", "audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0,", "'f2'])), '-8245754232361677810', ), ( pd.DataFrame( {'a': [0, 1], 'b': [2, 3]}, audformat.segmented_index(['f1', 'f2'],", "0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index('f2', 0, 1),", "1], [2, 2]), ], audformat.segmented_index( ['f1', 'f2', 'f2', 'f3'], [0, 0, 1, 1],", "pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64' ),", "pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( ['a', 'b',", "without labels with pytest.raises(ValueError): db = audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize(", "[ pytest.param( '.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ),", "], audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1',", "= audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language, expected', [ ('en', 'eng'), ('en', 'eng'), ('english',", "f2,1.0,b f3,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], },", "], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2',", "StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b',", "1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), # combine series and overwrite", "expected) if files_duration and not allow_nat: # for filewise tables we expect a", "test_expand_file_path(tmpdir, index, root, expected): expanded_index = audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj, expected',", "'f3'], [0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, 1], ), ),", "isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index, extension, pattern, expected_index', [", "'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a',", "audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), '-3831446135233514455', ), ( pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])),", "}, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1.], audformat.filewise_index('f1'), name='c1'), pd.Series( [2.],", "as pd import pytest import audeer import audformat from audformat import utils from", "[1], [2]), dtype='category', ), pd.Timedelta(1, unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1, unit='s'),", "None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ),", "isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result) elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize(", "pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [", "pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pytest.FILE_DUR], ), ), # provide file durations", "[pd.Series([], audformat.filewise_index(), dtype='object')], False, pd.Series([], audformat.filewise_index(), dtype='object') ), ( [pd.Series([], audformat.segmented_index(), dtype='object')], False,", "['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ) ), ( [ pd.Series( ['a', 'b',", "None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index( pytest.DB.files[:2],", "), ], False, pd.Series( True, audformat.filewise_index(['f1', 'f2']), dtype='boolean', ), ), ( [ pd.Series(", "), ( (['a'], ['a']), ['a'], ), ( [{'a': 0}], {'a': 0}, ), (", "ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame(", "[ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [", "['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), ], False, pd.Series( ['a', 'b', 'a'],", "index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), # error: values", "i in range(1, 101)] ) ] ) def test_to_filewise(output_folder, table_id, expected_file_names): has_existed =", "), ( audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), '',", "audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']), ),", "audformat.filewise_index('f1'), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1',", "{ 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.segmented_index( ['f1', 'f1',", "), ( audformat.segmented_index(), '0', ), ( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ), ( audformat.segmented_index(['f1', 'f2']),", "], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2',", "obj, allow_nat=allow_nat, files_duration=files_duration, root=root, ) if not isinstance(result, pd.Index): result = result.index pd.testing.assert_index_equal(result,", "shutil import numpy as np import pandas as pd import pytest import audeer", "'f2')), ] ), ), ( audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some', 'where')),", "pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.],", "[ pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='int64', ), pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']),", "[ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b',", "( audformat.segmented_index(), None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'),", "dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), (", "'f2']), ), ( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ] ) def", "audformat.filewise_index(['f1', 'f2', 'f3']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1',", "audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented index with NaT, but file is missing", "1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ),", "audformat.segmented_index(), ), ( audformat.filewise_index(), False, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), True, None,", "in range(1, 101)] ) ] ) def test_to_filewise(output_folder, table_id, expected_file_names): has_existed = os.path.exists(output_folder)", ") pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj, allow_nat, files_duration, root, expected', [ # empty (", "{'int': [1, 2], 'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ),", "index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), columns=['value1', 'value2'],", "), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( ['a',", "[1., 2.], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series(", "), ( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1', 'f1', 'f2'], pd.to_timedelta(['0s', '1s',", ") ] ) def test_expand_file_path(tmpdir, index, root, expected): expanded_index = audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index,", "0], [pd.NaT, 1, pd.NaT, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "files = result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask = result.get_level_values( audformat.define.IndexField.END ) ==", "'f2']), ), ), ( [ pd.Series( [1.], audformat.filewise_index('f1'), name='c1'), pd.Series( [2.], audformat.segmented_index('f1', 0,", "[ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'), ), ( [ audformat.filewise_index(['f1',", "False, pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( [1.,", "[1., 2., 3.], }, audformat.filewise_index(['f1', 'f2', 'f3']), ), ), ( [ pd.Series( [1.,", "1]), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "'where', 'f2')), ] ), ), ( audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some',", "[0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f3'], [0, 0,", ") ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ),", "( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ],", "{'a': 0, 'b': 1}, ), ( [{'a': 0}, {'b': 1, 'c': 2}], {'a':", "scheme3 = audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] = scheme3 expected = audformat.Scheme(labels={'a': [4], 'b': [3]})", "import numpy as np import pandas as pd import pytest import audeer import", "marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ),", "), ( [pd.Series([], audformat.segmented_index(), dtype='object')], False, pd.Series([], audformat.segmented_index(), dtype='object') ), ( [pd.DataFrame([], audformat.segmented_index(),", "{ 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), (", "audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1,", "ends=['1s', '2s', '3s'], ), columns=['value1', 'value2'], ), ), pytest.param( StringIO('''value 0.0 1.0 2.0'''),", "False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ],", "file) assert file in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param( '.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError)", "audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f1'),", "audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "], False, pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series(", "marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp', 'segments', [ str(i).zfill(3) + f'_{j}' for i in range(1,", "( pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ), ( pd.DataFrame( {'a': [0, 1], 'b':", "'f1', 'f2'], pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s', '2s', '3s']), ], names=['file', 'start', 'end'], ),", "['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'c'], index=audformat.filewise_index(['f1',", "dtype='float64', ), ], False, pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), (", "= audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] = scheme2 expected = audformat.Scheme(labels={'a': [1, 2], 'b': [3]})", "# segmented index with NaT, but file is missing pytest.param( audformat.segmented_index(['f1'], [0]), None,", "1, 'c': 2}, ), ( [{'a': 0, 'b': 1}, {'b': 2, 'c': 2}],", "), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(),", "audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], True, pd.Series([2.], audformat.filewise_index('f1')), ), # combine values with matching", "f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'],", "in same location ( [ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([np.nan], audformat.filewise_index('f1')),", "index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category',", "pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.segmented_index( ['f1', 'f1'], [0,", "'scheme_id') # Fail for schemes without labels with pytest.raises(ValueError): db = audformat.Database('db') db.schemes['scheme_id']", "] ), ), ( audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index(", "marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( [1, 2, 3], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series(", "audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.],", "series with same name ( [ pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),", "'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where') + os.path.sep, audformat.filewise_index(", "), ( 'tmp', 'files', [str(i).zfill(3) for i in range(1, 101)] ) ] )", "pd.Series( True, audformat.filewise_index(['f1', 'f2']), dtype='boolean', ), ), ( [ pd.Series( 1, audformat.filewise_index('f1'), dtype='int64',", "pytest.raises(ValueError): db = audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language, expected', [", "2], audformat.filewise_index(['f1', 'f2']), dtype='Int64' ), ), ( [ pd.Series( True, audformat.filewise_index('f1'), dtype='bool', ),", "), '.', audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s', '3s'], ['2s', '4s'], ), )", "( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), # combine", "# One database db1 = audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id'] =", "{ 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.segmented_index( ['f1', 'f1'], [0, 0],", "audformat.filewise_index('f2'), dtype='int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ),", "), ( pd.DataFrame( {'a': [0, 1], 'b': [2, 3]}, audformat.segmented_index(['f1', 'f2'], [0, 0],", "pd.Index( ['f1', 'f2', 'f3'], name='file', ), ), ( StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''), pd.Series(", "f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.segmented_index(", "audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2, unit='s'), ), ( audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9, unit='s'), ),", "import os import shutil import numpy as np import pandas as pd import", "'', None, audformat.filewise_index(['f1', 'f2']), ), ( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ),", "False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( [1, 2, 3], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "pd.Series( 2, audformat.filewise_index('f2'), dtype='int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64',", "audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index( ['f1', 'f1', 'f2',", "for j in range(10) ] ), ( 'tmp', 'files', [str(i).zfill(3) for i in", "'tmp', 'files', [str(i).zfill(3) for i in range(1, 101)] ) ] ) def test_to_filewise(output_folder,", "None, marks=pytest.mark.xfail(raises=ValueError) ) ]) def test_read_csv(csv, result): obj = audformat.utils.read_csv(csv) if isinstance(result, pd.Index):", "], audformat.filewise_index(), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(),", "frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ),", "index with NaT, but file is missing pytest.param( audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ),", "'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), '', None, audformat.filewise_index(['f1', 'f2']), ),", "audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), # combine values", "audformat.filewise_index('f1')), ], True, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ],", "[ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(), ), ( [ audformat.segmented_index(), ],", "pd.testing.assert_index_equal(result, expected) if files_duration and not allow_nat: # for filewise tables we expect", "was created and can be removed if os.path.exists(output_folder): shutil.rmtree(output_folder) else: if table_id ==", "), ( [pd.DataFrame([], audformat.segmented_index(), dtype='object')], False, pd.DataFrame([], audformat.segmented_index(), dtype='object') ), # combine series", "audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1',", "pytest.FILE_DUR] ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), False, None,", "utils.join_labels(labels) == expected def test_join_schemes(): # Empty list audformat.utils.join_schemes([], 'scheme_id') # One database", "'c'], }, index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1', 'value2'], ), ), ( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0", "'f2', 'f2', 'f3'], [0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, 1],", "'f3']), ], audformat.segmented_index('f2', 0, 1), ), ] ) def test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs),", "'f3']), columns=['value1', 'value2'], ), ), ( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0,", "2.], audformat.segmented_index(['f1', 'f2'])), ), # combine values in same location ( [ pd.Series([np.nan],", "[3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id') assert db1.schemes['scheme_id'] == expected assert db2.schemes['scheme_id'] == expected #", "), ] ) def test_replace_file_extension(index, extension, pattern, expected_index): index = audformat.utils.replace_file_extension( index, extension,", "'b', 'c'], ), ( (['a'], ['a']), ['a'], ), ( [{'a': 0}], {'a': 0},", "0, 'b': 1}, {'b': 2, 'c': 2}], {'a': 0, 'b': 2, 'c': 2},", "( [ pd.Series( [1.], audformat.filewise_index('f1'), name='c1'), pd.Series( [2.], audformat.segmented_index('f1', 0, 1), name='c2', ),", "combine values in same location ( [ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False,", "[0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), # file not found pytest.param(", "( [{'a': 0}, {'b': 1}], {'a': 0, 'b': 1}, ), ( [{'a': 0},", "pd.Series([2.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), ], ) def test_concat(objs, overwrite, expected):", "'2s'], ends=['1s', '2s', '3s'], ), columns=['value1', 'value2'], ), ), pytest.param( StringIO('''value 0.0 1.0", "'f4']), name='c2', ), pd.DataFrame( { 'c1': [np.nan, 3.], 'c2': ['b', 'c'], }, audformat.segmented_index(['f2',", "pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj, expected_duration', [ ( audformat.segmented_index(), pd.Timedelta(0, unit='s'),", "j in range(10) ] ), ( 'tmp', 'files', [str(i).zfill(3) for i in range(1,", "'f3']), dtype='category', ) ), # combine series with non-nullable dtype ( [ pd.Series([1,", "'3s'], ['2s', '4s'], ), '.', audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s', '3s'], ['2s',", "os.path.isabs(output_folder) == os.path.isabs(files[0]) if table_id == 'files': # files of unprocessed frame are", "( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1, unit='s'), ), # filewise index, but file is", "), ( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1',", "audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(), ), (", "audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), ( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3',", "), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where'))", "'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category',", "audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ] ) def test_replace_file_extension(index, extension, pattern,", "assert db2.schemes['scheme_id'] == expected # Three database db3 = audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a':", "audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32',", "( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), ( [", "StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1', 'f1', 'f2'], pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s',", "'files', [str(i).zfill(3) for i in range(1, 101)] ) ] ) def test_to_filewise(output_folder, table_id,", "'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ] ) def test_replace_file_extension(index, extension, pattern, expected_index): index", "2}, ), ( [{'a': 0, 'b': 1}, {'b': 1, 'c': 2}], {'a': 0,", "do not match pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError),", "nat ( audformat.filewise_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]), True, None, None,", "# files of unprocessed frame are relative to `root` files = [os.path.join(pytest.DB_ROOT, f)", "do not match pytest.param( [ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError),", "2]), pd.Timedelta(3, unit='s'), ), ( pd.Series( index=audformat.segmented_index(['f1'], [1], [2]), dtype='category', ), pd.Timedelta(1, unit='s'),", "expect a duration for every file # for segmented only where end ==", "'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'),", "files: file = os.path.join(root, file) assert file in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param(", "), # combine values in same location ( [ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')),", "pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], True, pd.Series([2.], audformat.filewise_index('f1')),", "files] assert file_names == expected_file_names # clean-up if not has_existed: # output folder", "'b'], ['b', 'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0: {'age': 20}}, {'0': {'age':", "files_duration and not allow_nat: # for filewise tables we expect a duration for", "None, marks=pytest.mark.xfail(raises=ValueError), ), ], ) def test_concat(objs, overwrite, expected): obj = utils.concat(objs, overwrite=overwrite)", "pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( [1,", "], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ),", "expected_file_names # clean-up if not has_existed: # output folder was created and can", "'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64'", "'3s'], ), name='value', ), ), ( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1',", "1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1', 'f1', 'f2',", "pd.Series( True, audformat.filewise_index('f2'), dtype='bool', ), ], False, pd.Series( True, audformat.filewise_index(['f1', 'f2']), dtype='boolean', ),", "}, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), (", "audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( [1, 2,", "[0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index(), ),", "0.5], [0.2, pd.NaT], ), True, None, None, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT],", "[1, 2]}) db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id'] == scheme1 # Two", "[ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c', ), pd.DataFrame( { 'c': [2., 3.]", "unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1, unit='s'), ), # filewise index, but", "is unprocessed assert os.path.isabs(output_folder) == os.path.isabs(files[0]) if table_id == 'files': # files of", "'c'], }, index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ),", "os.path.join(root, file) assert file in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param( '.', 'segments', None,", "dtype='int64', ), pd.Series( 2, audformat.filewise_index('f2'), dtype='int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1',", "Fail for schemes without labels with pytest.raises(ValueError): db = audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str')", "[1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index(), ), ( [", "with different names ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ], False,", "file is missing pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented index with NaT,", "list audformat.utils.join_schemes([], 'scheme_id') # One database db1 = audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a': [1,", "== expected_duration @pytest.mark.parametrize( 'index, root, expected', [ ( audformat.filewise_index(), None, audformat.filewise_index(), ), (", "[1, 2, 3], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2',", "None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index( pytest.DB.files[:2],", "1], ), ), ), # combine series and data frame ( [ pd.Series(", "'c': [1., 2., 3.], }, audformat.filewise_index(['f1', 'f2', 'f3']), ), ), ( [ pd.Series(", "( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2'],", "if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj, expected_duration', [ (", "'f3', 'f4']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'],", "[3]}) audformat.utils.join_schemes([db1, db2, db3], 'scheme_id') # Fail for schemes without labels with pytest.raises(ValueError):", "audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "'start', 'end'], ), ), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0],", "from io import StringIO import os import shutil import numpy as np import", "StringIO('''value 0.0 1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) ) ]) def test_read_csv(csv, result): obj =", "), ( audformat.filewise_index(), False, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), True, None, None,", "if table_id == 'segments': for f in frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f): os.remove(f) @pytest.mark.parametrize(", "assert os.path.isabs(output_folder) == os.path.isabs(files[0]) if table_id == 'files': # files of unprocessed frame", "audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0,", "audformat.filewise_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1', 'f2']), ), (", "), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), False, None, pytest.DB_ROOT, audformat.segmented_index(", "combine series with same name ( [ pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1',", "audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(), ), (", "0}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1, 'c': 2}, ), (", "+ audeer.safe_path('f2'), ] ), ), ( audformat.segmented_index( ['f1', 'f2'], ['1s', '3s'], ['2s', '4s'],", "pd.Series( [0.0, 1.0, 2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value', ), ), ( StringIO('''file,value1,value2 f1,0.0,a", "Three database db3 = audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] = scheme3 expected", "2.], audformat.segmented_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1.,", "pd.MultiIndex.from_arrays( [ ['f1', 'f1', 'f2'], pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s', '2s', '3s']), ], names=['file',", "pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files = frame.index.get_level_values(define.IndexField.FILE).values if table_id == 'segmented': # already", "in frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(),", "[1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ], False, pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']),", "'mp3', None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), (", "[1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( ['a', 'b', 'a'],", "unit='s'), ), ( audformat.segmented_index(['f1', 'f2'], [0, 1], [2, 2]), pd.Timedelta(3, unit='s'), ), (", "'4s'], ), ) ] ) def test_expand_file_path(tmpdir, index, root, expected): expanded_index = audformat.utils.expand_file_path(index,", "), pytest.param( [ pd.Series( [1, 2, 3], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a',", "0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0,", "'f2', 'f3'], [0, 0, 0, 0], [pd.NaT, 1, 1, 1], ), ), (", "def test_replace_file_extension(index, extension, pattern, expected_index): index = audformat.utils.replace_file_extension( index, extension, pattern=pattern, ) pd.testing.assert_index_equal(index,", "[ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ], False, pd.DataFrame( { 'c1': [1.,", "( [{'a': 0, 'b': 1}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1,", "def test_read_csv(csv, result): obj = audformat.utils.read_csv(csv) if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result) elif isinstance(result,", "'obj, expected_duration', [ ( audformat.segmented_index(), pd.Timedelta(0, unit='s'), ), ( audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2,", "['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ), name='value', ), ),", "f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'],", "np import pandas as pd import pytest import audeer import audformat from audformat", "audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(", ") assert audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files = frame.index.get_level_values(define.IndexField.FILE).values if", "( [], False, pd.Series([], audformat.filewise_index(), dtype='object'), ), ( [pd.Series([], audformat.filewise_index(), dtype='object')], False, pd.Series([],", "None, audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame( {'int': [1, 2], 'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ),", "pd.Timedelta(3, unit='s'), ), ( pd.Series( index=audformat.segmented_index(['f1'], [1], [2]), dtype='category', ), pd.Timedelta(1, unit='s'), ),", "marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series and frame ( pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True,", "['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT,", "# provide file durations ( audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2,", "pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize( 'labels, expected', [ ( [], [], ), (", "index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ), name='value', ),", "2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame( {'int': [1, 2],", "os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ), ( [ audformat.filewise_index(),", "), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ),", "'1s', '2s']), pd.to_timedelta(['1s', '2s', '3s']), ], names=['file', 'start', 'end'], ), ), ( StringIO('''file,start,end,value", "audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']),", "'3s'], ['2s', '4s'], ), ) ] ) def test_expand_file_path(tmpdir, index, root, expected): expanded_index", "pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a',", "False, pd.DataFrame( { 'c': [1., 2., 3.], }, audformat.filewise_index(['f1', 'f2', 'f3']), ), ),", "( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a',", "audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), ( audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']), ), ( audformat.filewise_index(['f1.wav',", "f2 f3'''), pd.Index( ['f1', 'f2', 'f3'], name='file', ), ), ( StringIO('''file,value f1,0.0 f2,1.0", "@pytest.mark.parametrize( 'objs, overwrite, expected', [ # empty ( [], False, pd.Series([], audformat.filewise_index(), dtype='object'),", "), pd.DataFrame( { 'c': [2., 3.] }, audformat.filewise_index(['f2', 'f3']), ), ], False, pd.DataFrame(", "), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1', 'f2', 'f3']),", "for i in range(1, 101)] ) ] ) def test_to_filewise(output_folder, table_id, expected_file_names): has_existed", "audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some',", "( audformat.filewise_index(['f1.wav', 'f2.wav']), '', None, audformat.filewise_index(['f1', 'f2']), ), ( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg',", "= scheme2 expected = audformat.Scheme(labels={'a': [1, 2], 'b': [3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id') assert", "pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), # combine", "audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pytest.FILE_DUR], ), ), # provide file durations (", "'.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp',", "pd.Series([2.], audformat.filewise_index('f2')), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.segmented_index('f1')),", "pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1',", "= [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in files] assert file_names == expected_file_names # clean-up", "None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad language', None, marks=pytest.mark.xfail(raises=ValueError)", "define.IndexField.FILE): if os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ), (", "'f2', 'f3']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2',", "None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), ( audformat.segmented_index(['f1.wav',", "), pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ], False, pd.Series( [1, 2],", "False, None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series and frame ( pd.Series( [1,", "), ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ], False, pd.DataFrame( {", "'value2'], ), ), pytest.param( StringIO('''value 0.0 1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) ) ]) def", "( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1', 'f2', 'f3']), ),", "'1s', '2s'], ends=['1s', '2s', '3s'], ), columns=['value1', 'value2'], ), ), pytest.param( StringIO('''value 0.0", "), ], False, pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [", "], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), # combine series", "1, 'c': 2}], {'a': 0, 'b': 1, 'c': 2}, ), ( [{'a': 0,", "pd.Series( 1, audformat.filewise_index('f1'), dtype='int64', ), pd.Series( 2, audformat.filewise_index('f2'), dtype='int64', ), ], False, pd.Series(", "language', None, marks=pytest.mark.xfail(raises=ValueError) ) ] ) def test_map_language(language, expected): assert utils.map_language(language) == expected", "), # combine values with matching dtype ( [ pd.Series( [1, 2], audformat.filewise_index(['f1',", "audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), # combine series and overwrite values (", "], True, pd.Series([2.], audformat.filewise_index('f1')), ), # combine values with matching dtype ( [", "audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), # combine values in same", "'b', 'c'], }, index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'],", "), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'), ), (", "2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'],", "2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']),", "'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a', 'b',", "'b': [3]}) audformat.utils.join_schemes([db1, db2, db3], 'scheme_id') # Fail for schemes without labels with", "unprocessed frame are relative to `root` files = [os.path.join(pytest.DB_ROOT, f) for f in", "pd.Timedelta(0, unit='s'), ), ( audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2, unit='s'), ), ( audformat.segmented_index(['f1'], [0.1],", "expected_duration): duration = audformat.utils.duration(obj) if pd.isnull(expected_duration): assert pd.isnull(duration) else: assert duration == expected_duration", "np.nan], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1.,", "pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s',", "[ pd.Series( [1.], audformat.filewise_index('f1'), name='c1'), pd.Series( [2.], audformat.segmented_index('f1', 0, 1), name='c2', ), ],", "audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(),", "f2,1.0 f3,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value', ), ), (", "audformat.segmented_index(['f1', 'f2'])), ), # combine values in same location ( [ pd.Series([np.nan], audformat.filewise_index('f1')),", "), ), ( audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR]", "'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(), ), ( [", "in files] assert all(os.path.exists(f) for f in files) file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for", "name='c2', ), ], False, pd.DataFrame( { 'c1': [1., 2.], 'c2': [np.nan, 2.], },", "'segments': for f in frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs, expected', [", "['a'], ), ( [{'a': 0}], {'a': 0}, ), ( [{'a': 0}, {'b': 1}],", "'b', 'c'], }, index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1', 'value2'], ), ), ( StringIO('''file,start,value f1,00:00:00,0.0", "f1,0.0 f2,1.0 f3,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value', ), ),", "same location ( [ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([np.nan], audformat.filewise_index('f1')), ),", "'f4']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0,", "and frame ( pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ),", ") def test_to_segmented_index(obj, allow_nat, files_duration, root, expected): result = audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration,", "False, None, marks=pytest.mark.xfail(raises=ValueError), ), ], ) def test_concat(objs, overwrite, expected): obj = utils.concat(objs,", "], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ) ), #", "is missing pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented index with NaT, but", "), ( [{'a': 0}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1, 'c':", "], False, None, marks=pytest.mark.xfail(raises=ValueError), ), ], ) def test_concat(objs, overwrite, expected): obj =", "[0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0,", "False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ),", "pd import pytest import audeer import audformat from audformat import utils from audformat", "{'a': 0, 'b': 1, 'c': 2}, ), ( [{'a': 0, 'b': 1}, {'b':", "True, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), False, None, None, audformat.segmented_index(), ), #", "True, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], True, pd.Series([2.],", "'where')) + os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f2'), ] ),", "* 2], ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pd.NaT, pd.NaT], ), False,", ") def test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize( 'labels, expected', [ (", "pd.Series([np.nan], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')),", "pd.Timedelta(1, unit='s'), ), # filewise index, but file is missing pytest.param( audformat.filewise_index(['f1']), None,", "[1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index( ['f1',", "None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad language', None, marks=pytest.mark.xfail(raises=ValueError) ) ] ) def test_map_language(language,", "1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index( ['f1', 'f2', 'f2', 'f3'],", ") @pytest.mark.parametrize( 'labels, expected', [ ( [], [], ), ( (['a'], ['b']), ['a',", "audformat.filewise_index(), ), ( [ audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(['f1', 'f2']),", "starts=['0s', '0s', '0s'], ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end f1,00:00:00,00:00:01", "), ( [{'a': 0, 'b': 1}, {'b': 2, 'c': 2}], {'a': 0, 'b':", "None, audformat.segmented_index(), ), ( audformat.segmented_index(), False, None, None, audformat.segmented_index(), ), # allow nat", "'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1',", "= audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] = scheme3 expected = audformat.Scheme(labels={'a': [4], 'b': [3]}) audformat.utils.join_schemes([db1,", "scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id'] == scheme1 # Two databases db2 = audformat.Database('db2')", "# combine series and data frame ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']),", "[ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ], False, pd.DataFrame( { 'c1': [1.],", "2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( ['a', np.nan, 'd'], audformat.filewise_index(['f1', 'f2', 'f4']), name='c2',", "marks=pytest.mark.xfail(raises=ValueError) ) ] ) def test_map_language(language, expected): assert utils.map_language(language) == expected @pytest.mark.parametrize('csv,result', [", "audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(),", "2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1.], audformat.filewise_index('f1'), name='c1'), pd.Series(", "audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "2], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64' ), ),", "), ] ) def test_join_labels(labels, expected): assert utils.join_labels(labels) == expected def test_join_schemes(): #", "audformat.segmented_index(), dtype='object') ), ( [pd.DataFrame([], audformat.segmented_index(), dtype='object')], False, pd.DataFrame([], audformat.segmented_index(), dtype='object') ), #", "but file is missing pytest.param( audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] ) def", "== expected assert utils.hash(obj[::-1]) == expected @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(),", "), ( StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "( StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value',", "audformat.segmented_index( ['f1', 'f1'], [0, 0], [None, 1], ), ), ), # combine series", "None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame( {'int': [1, 2], 'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]),", "'c1': [1.], 'c2': [2.], }, audformat.filewise_index('f1'), ), ), ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'),", "None, None, audformat.segmented_index(), ), # allow nat ( audformat.filewise_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]),", "30}}], [], marks=pytest.mark.xfail(raises=ValueError), ), ] ) def test_join_labels(labels, expected): assert utils.join_labels(labels) == expected", "( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c', ), pd.DataFrame( { 'c': [2.,", "db3 = audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] = scheme3 expected = audformat.Scheme(labels={'a':", "False, pd.DataFrame( { 'c1': [1., 2., 3., np.nan], 'c2': ['a', 'b', 'c', 'd']", "StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b',", "'f2'], [0, 1], [2, 2]), pd.Timedelta(3, unit='s'), ), ( pd.Series( index=audformat.segmented_index(['f1'], [1], [2]),", "pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([np.nan], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')),", ") == pd.NaT files = files[mask] for file in files: file = os.path.join(root,", "), ( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ] ) def test_replace_file_extension(index,", "[np.nan, 2.], }, audformat.segmented_index( ['f1', 'f1'], [0, 0], [None, 1], ), ), ),", "already `framewise` frame is unprocessed assert os.path.isabs(output_folder) == os.path.isabs(files[0]) if table_id == 'files':", "( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'],", "dtype='float32', ), pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ], False, pd.Series( [1.,", "audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( ['a', np.nan, 'd'], audformat.filewise_index(['f1', 'f2', 'f4']), name='c2', ),", "result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask = result.get_level_values( audformat.define.IndexField.END ) == pd.NaT files", "{'b': 1, 'c': 2}], {'a': 0, 'b': 1, 'c': 2}, ), ( [{'a':", "dtype='object')], False, pd.Series([], audformat.segmented_index(), dtype='object') ), ( [pd.DataFrame([], audformat.segmented_index(), dtype='object')], False, pd.DataFrame([], audformat.segmented_index(),", "pytest.param( 'Bad language', None, marks=pytest.mark.xfail(raises=ValueError) ) ] ) def test_map_language(language, expected): assert utils.map_language(language)", "'f2']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series(", "), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None,", "'f2']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2',", "audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), ], ) def test_concat(objs, overwrite,", "), ), ( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some',", "( audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ),", "None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] ) def test_duration(obj, expected_duration): duration = audformat.utils.duration(obj) if pd.isnull(expected_duration):", "1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a', 'b'], ['b', 'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError),", "'end'], ), ), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index(", "index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1', 'value2'], ), ), ( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series(", "os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR", "True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ] ) def test_to_segmented_index(obj, allow_nat, files_duration, root, expected):", "[2])), pd.Timedelta(1, unit='s'), ), # filewise index, but file is missing pytest.param( audformat.filewise_index(['f1']),", "audformat.segmented_index(['f1', 'f2'], [0, 1], [2, 2]), pd.Timedelta(3, unit='s'), ), ( pd.Series( index=audformat.segmented_index(['f1'], [1],", "( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series( [1., 2.], audformat.filewise_index(['f1',", "pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ], False, pd.DataFrame( { 'c1': [1., np.nan],", "1, audformat.filewise_index('f1'), dtype='int64', ), pd.Series( 2, audformat.filewise_index('f2'), dtype='int64', ), ], False, pd.Series( [1,", "None, None, audformat.segmented_index(), ), ( audformat.filewise_index(), False, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(),", "1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2',", "] ), ), ( audformat.segmented_index( ['f1', 'f2'], ['1s', '3s'], ['2s', '4s'], ), '.',", "1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0,", "can be removed if os.path.exists(output_folder): shutil.rmtree(output_folder) else: if table_id == 'segments': for f", "[1, 2], 'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ]", "audformat.segmented_index(), True, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), False, None, None, audformat.segmented_index(), ),", "[1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "[ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series(", "[0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s',", "), pytest.param( StringIO('''value 0.0 1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) ) ]) def test_read_csv(csv, result):", "1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'],", "pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1',", "pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), # file not found", "None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented index with NaT, but file is missing pytest.param(", "StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s',", "'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index(", "= os.path.exists(output_folder) frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, ) assert audformat.index_type(frame) ==", "audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(), ), (", "# Two databases db2 = audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] = scheme2", "utils from audformat import define @pytest.mark.parametrize( 'objs, overwrite, expected', [ # empty (", "def test_hash(obj, expected): assert utils.hash(obj) == expected assert utils.hash(obj[::-1]) == expected @pytest.mark.parametrize( 'objs,", "pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series and frame (", "= scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id'] == scheme1 # Two databases db2 =", "audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ],", "audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'],", "2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ], False, pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64',", "pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'c'],", "0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index(), ), (", "but file is missing pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented index with", "2]}) db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id'] == scheme1 # Two databases", "f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], },", "# output folder was created and can be removed if os.path.exists(output_folder): shutil.rmtree(output_folder) else:", "@pytest.mark.parametrize( 'obj, allow_nat, files_duration, root, expected', [ # empty ( audformat.filewise_index(), True, None,", "[ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), ], ) def", "pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1, unit='s'), ), # filewise index, but file is missing", "[ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]),", "name ( [ pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1.,", "], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'],", "( (['a'], ['b']), ['a', 'b'], ), ( (['a'], ['b', 'c']), ['a', 'b', 'c'],", "], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ],", "audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0, 0, 0,", "non-nullable dtype ( [ pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), ],", "), pd.Timedelta(1, unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1, unit='s'), ), # filewise", "'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) + os.path.sep", "dtype ( [ pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), ], False,", "1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ), ( pd.DataFrame( {'a': [0, 1], 'b': [2, 3]},", "), pytest.param( [{0: {'age': 20}}, {'0': {'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError), ), ] )", "for i in range(1, 11) for j in range(10) ] ), ( 'tmp',", "3], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category',", "= frame.index.get_level_values(define.IndexField.FILE).values if table_id == 'segmented': # already `framewise` frame is unprocessed assert", "), ), # file not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError),", "# for filewise tables we expect a duration for every file # for", "[1, 1]), ), '-103439349488189352', ), ] ) def test_hash(obj, expected): assert utils.hash(obj) ==", "in range(10) ] ), ( 'tmp', 'files', [str(i).zfill(3) for i in range(1, 101)]", "{'a': 2}, ), pytest.param( ['a', 'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a', 'b',", "dtype='Int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), (", "'f2']), dtype='float64', ), ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2', 0, 1), ),", "of unprocessed frame are relative to `root` files = [os.path.join(pytest.DB_ROOT, f) for f", "audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1',", "values do not match pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], False, None,", "labels with pytest.raises(ValueError): db = audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language,", "f3,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value', ), ), ( StringIO('''file,value1,value2", "expanded_index = audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj, expected', [ ( audformat.filewise_index(), '0',", "( audformat.filewise_index(), '0', ), ( audformat.segmented_index(), '0', ), ( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ),", "[2, 2]), ], audformat.segmented_index( ['f1', 'f2', 'f2', 'f3'], [0, 0, 1, 1], [1,", "), name='value', ), ), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1': [0.0,", "files_duration=files_duration, root=root, ) if not isinstance(result, pd.Index): result = result.index pd.testing.assert_index_equal(result, expected) if", "[1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2', 0,", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), (", "audformat.segmented_index(), pd.Timedelta(0, unit='s'), ), ( audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2, unit='s'), ), ( audformat.segmented_index(['f1'],", "'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where')", "for filewise tables we expect a duration for every file # for segmented", "os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR", "= result.get_level_values( audformat.define.IndexField.END ) == pd.NaT files = files[mask] for file in files:", "'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ),", "( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']),", "), ), # combine series and data frame ( [ pd.Series( [1., 2.],", "[0, 0], [None, 1], ), ), ), # combine series and data frame", "allow_nat, files_duration, root, expected', [ # empty ( audformat.filewise_index(), True, None, None, audformat.segmented_index(),", "False, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, pd.Series([1.],", "[ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ( [", "frame.reset_index(drop=True), ) files = frame.index.get_level_values(define.IndexField.FILE).values if table_id == 'segmented': # already `framewise` frame", "has_existed = os.path.exists(output_folder) frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, ) assert audformat.index_type(frame)", "pd.DataFrame( { 'c': [1., 2., 3.], }, audformat.filewise_index(['f1', 'f2', 'f3']), ), ), (", "expected) @pytest.mark.parametrize( 'obj, expected_duration', [ ( audformat.segmented_index(), pd.Timedelta(0, unit='s'), ), ( audformat.segmented_index(['f1'], [0],", "audformat.filewise_index(), dtype='object')], False, pd.Series([], audformat.filewise_index(), dtype='object') ), ( [pd.Series([], audformat.segmented_index(), dtype='object')], False, pd.Series([],", "audformat.segmented_index( ['f1', 'f2'], ['1s', '3s'], ['2s', '4s'], ), '.', audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'),", "'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), (", "0.0 1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) ) ]) def test_read_csv(csv, result): obj = audformat.utils.read_csv(csv)", "), ( [ pd.Series( True, audformat.filewise_index('f1'), dtype='bool', ), pd.Series( True, audformat.filewise_index('f2'), dtype='bool', ),", "== 'segmented': # already `framewise` frame is unprocessed assert os.path.isabs(output_folder) == os.path.isabs(files[0]) if", "audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.],", "'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), # error: values do not", "'b', 'c', 'd'] }, audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ), # error: dtypes", "== os.path.isabs(files[0]) if table_id == 'files': # files of unprocessed frame are relative", "provide file durations ( audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, },", "3.] }, audformat.filewise_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c': [1., 2., 3.],", "db1 = audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1], 'scheme_id')", "[1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2,", "[pd.NaT, 1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "False, pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( ['a',", "( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), True, None, None, audformat.segmented_index( pytest.DB.files[:2],", "'Bad language', None, marks=pytest.mark.xfail(raises=ValueError) ) ] ) def test_map_language(language, expected): assert utils.map_language(language) ==", "pytest import audeer import audformat from audformat import utils from audformat import define", "f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s',", "], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')),", "combine series and overwrite values ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], True,", "pytest.param( audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] ) def test_duration(obj, expected_duration): duration =", "+ audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f2'), ] ), ), ( audformat.segmented_index(", "( StringIO('''file f1 f2 f3'''), pd.Index( ['f1', 'f2', 'f3'], name='file', ), ), (", "'b': [3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id') assert db1.schemes['scheme_id'] == expected assert db2.schemes['scheme_id'] == expected", "audformat.Scheme(labels={'a': [1, 2], 'b': [3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id') assert db1.schemes['scheme_id'] == expected assert", "# combine series with different names ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'),", "{'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError), ), ] ) def test_join_labels(labels, expected): assert utils.join_labels(labels) ==", "'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ],", "'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1',", "[0, 0, 1, 1], [1, 1, 2, 2], ), ), ( [ audformat.filewise_index(),", "}, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1',", "), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'],", "'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [", "audformat.segmented_index('f1', 0, 1), name='c2', ), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2':", "'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False,", "'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "def test_join_schemes(): # Empty list audformat.utils.join_schemes([], 'scheme_id') # One database db1 = audformat.Database('db1')", "and can be removed if os.path.exists(output_folder): shutil.rmtree(output_folder) else: if table_id == 'segments': for", "root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, ) assert audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files", "], False, None, marks=pytest.mark.xfail(raises=ValueError), ), # error: values do not match pytest.param( [", "), ( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), ( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3',", "'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']),", "0, 1), name='c2', ), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan,", "1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) ) ]) def test_read_csv(csv, result): obj = audformat.utils.read_csv(csv) if", "0, 'b': 1, 'c': 2}, ), ( [{'a': 0, 'b': 1}, {'b': 2,", "error: dtypes do not match pytest.param( [ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False,", "'f3'], name='file', ), ), ( StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''), pd.Series( [0.0, 1.0, 2.0],", "), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some',", "[], marks=pytest.mark.xfail(raises=ValueError), ), ] ) def test_join_labels(labels, expected): assert utils.join_labels(labels) == expected def", "pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files = frame.index.get_level_values(define.IndexField.FILE).values if table_id == 'segmented': # already `framewise`", "dtype='bool', ), ], False, pd.Series( True, audformat.filewise_index(['f1', 'f2']), dtype='boolean', ), ), ( [", "[ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.],", "'f3']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0, 0, 0, 0],", "1], [2, 2]), pd.Timedelta(3, unit='s'), ), ( pd.Series( index=audformat.segmented_index(['f1'], [1], [2]), dtype='category', ),", "), pd.Series( True, audformat.filewise_index('f2'), dtype='bool', ), ], False, pd.Series( True, audformat.filewise_index(['f1', 'f2']), dtype='boolean',", "audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), ], ) def test_concat(objs, overwrite, expected): obj", "[ pd.Series( 1, audformat.filewise_index('f1'), dtype='int64', ), pd.Series( 2, audformat.filewise_index('f2'), dtype='int64', ), ], False,", "'b': 1, 'c': 2}, ), ( [{'a': 0, 'b': 1}, {'b': 1, 'c':", "pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ], False,", "import define @pytest.mark.parametrize( 'objs, overwrite, expected', [ # empty ( [], False, pd.Series([],", "range(10) ] ), ( 'tmp', 'files', [str(i).zfill(3) for i in range(1, 101)] )", "2.], audformat.filewise_index(['f1', 'f2']), name='c', ), pd.DataFrame( { 'c': [2., 3.] }, audformat.filewise_index(['f2', 'f3']),", "), ), ( [ pd.Series( 1, audformat.filewise_index('f1'), dtype='int64', ), pd.Series( 2, audformat.filewise_index('f2'), dtype='int64',", "'f2']), dtype='float32', ), pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ], False, pd.Series(", "[pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), # file not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False,", "if table_id == 'segmented': # already `framewise` frame is unprocessed assert os.path.isabs(output_folder) ==", "], False, pd.DataFrame( { 'c1': [1.], 'c2': [2.], }, audformat.filewise_index('f1'), ), ), (", "'c2': ['b', 'c'], }, audformat.segmented_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c1': [1.,", "( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b',", "], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.segmented_index( ['f1',", "obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, ) assert audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), )", "['f1', 'f2', 'f2', 'f3'], [0, 0, 1, 1], [1, 1, 2, 2], ),", "0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1],", "index, extension, pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj, allow_nat, files_duration, root, expected', [", "'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.segmented_index( ['f1', 'f1'], [0, 0], [None,", "audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) +", "'c2': [2.], }, audformat.filewise_index('f1'), ), ), ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'),", "), ( [ pd.Series( [1.], audformat.filewise_index('f1'), name='c1'), pd.Series( [2.], audformat.segmented_index('f1', 0, 1), name='c2',", "), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1',", "f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT,", "0, 0], [pd.NaT, 1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "False, pd.Series( True, audformat.filewise_index(['f1', 'f2']), dtype='boolean', ), ), ( [ pd.Series( 1, audformat.filewise_index('f1'),", "every file # for segmented only where end == NaT files = result.get_level_values(audformat.define.IndexField.FILE)", ") def test_join_labels(labels, expected): assert utils.join_labels(labels) == expected def test_join_schemes(): # Empty list", "'f2']), audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1', 'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']),", "audformat.filewise_index('f1'), dtype='bool', ), pd.Series( True, audformat.filewise_index('f2'), dtype='bool', ), ], False, pd.Series( True, audformat.filewise_index(['f1',", "'f2', 'f3'], name='file', ), ), ( StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''), pd.Series( [0.0, 1.0,", "), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pd.NaT, pd.NaT], ), False, { os.path.join(pytest.DB_ROOT,", "allow_nat: # for filewise tables we expect a duration for every file #", "11) for j in range(10) ] ), ( 'tmp', 'files', [str(i).zfill(3) for i", "[ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'],", "audformat.segmented_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.],", "numpy as np import pandas as pd import pytest import audeer import audformat", "[1, 1]), ], audformat.segmented_index( ['f1', 'f2', 'f3'], [0, 0, 0], [1, 1, 1],", "), ( audformat.segmented_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5],", "# Fail for schemes without labels with pytest.raises(ValueError): db = audformat.Database('db') db.schemes['scheme_id'] =", "# error: dtypes do not match pytest.param( [ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ],", "), ( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1',", "1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index( ['f1', 'f1',", "audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f2'), ] ), ), ( audformat.segmented_index( ['f1',", "names ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ], False, pd.DataFrame( {", "pd.Series([2.], audformat.filewise_index('f1')), ), # combine values with matching dtype ( [ pd.Series( [1,", "'3s'], ), columns=['value1', 'value2'], ), ), pytest.param( StringIO('''value 0.0 1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError)", "( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), (", "duration = audformat.utils.duration(obj) if pd.isnull(expected_duration): assert pd.isnull(duration) else: assert duration == expected_duration @pytest.mark.parametrize(", "0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2',", "), ( audformat.segmented_index( ['f1', 'f2'], ['1s', '3s'], ['2s', '4s'], ), '.', audformat.segmented_index( [", "), ( audformat.filewise_index(['f1.wav', 'f2.wav']), '', None, audformat.filewise_index(['f1', 'f2']), ), ( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3',", "( 'tmp', 'files', [str(i).zfill(3) for i in range(1, 101)] ) ] ) def", "audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2']),", "'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.segmented_index(), ],", "], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')),", "pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.],", "( (['a'], ['b', 'c']), ['a', 'b', 'c'], ), ( (['a'], ['a']), ['a'], ),", "found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series and frame", "pd.Series( [2.], audformat.segmented_index('f1', 0, 1), name='c2', ), ], False, pd.DataFrame( { 'c1': [1.,", "( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), ( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None,", "['f1', 'f2', 'f3'], [0, 0, 0], [1, 1, 1], ), ), ( [", "), ] ) def test_hash(obj, expected): assert utils.hash(obj) == expected assert utils.hash(obj[::-1]) ==", "audformat.filewise_index('f1')), ], True, pd.Series([2.], audformat.filewise_index('f1')), ), # combine values with matching dtype (", "'d'], audformat.filewise_index(['f1', 'f2', 'f4']), name='c2', ), pd.DataFrame( { 'c1': [np.nan, 3.], 'c2': ['b',", "files = frame.index.get_level_values(define.IndexField.FILE).values if table_id == 'segmented': # already `framewise` frame is unprocessed", "['a', np.nan, 'd'], audformat.filewise_index(['f1', 'f2', 'f4']), name='c2', ), pd.DataFrame( { 'c1': [np.nan, 3.],", "[1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(), ), (", "audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "test_join_labels(labels, expected): assert utils.join_labels(labels) == expected def test_join_schemes(): # Empty list audformat.utils.join_schemes([], 'scheme_id')", "== expected @pytest.mark.parametrize('csv,result', [ ( StringIO('''file f1 f2 f3'''), pd.Index( ['f1', 'f2', 'f3'],", "), ), ( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1',", "'b': [2, 3]}, audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), ), '-103439349488189352', ), ]", "None, audformat.filewise_index(), ), ( audformat.segmented_index(), None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index(", "None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), False, None, None, audformat.segmented_index(), ), # allow", "101)] ) ] ) def test_to_filewise(output_folder, table_id, expected_file_names): has_existed = os.path.exists(output_folder) frame =", "`root` files = [os.path.join(pytest.DB_ROOT, f) for f in files] assert all(os.path.exists(f) for f", "1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0,", "), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(), ), ( [", "'b': 2, 'c': 2}, ), ( [{'a': 0}, {'a': 1}, {'a': 2}], {'a':", "expected_index', [ ( audformat.filewise_index(), 'mp3', None, audformat.filewise_index(), ), ( audformat.segmented_index(), 'mp3', None, audformat.segmented_index(),", "overwrite, expected', [ # empty ( [], False, pd.Series([], audformat.filewise_index(), dtype='object'), ), (", "), ( audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), ), (", "[2, 2]), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), (", "'c': 2}], {'a': 0, 'b': 2, 'c': 2}, ), ( [{'a': 0}, {'a':", "[pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), False,", "pd.Series([], audformat.filewise_index(), dtype='object') ), ( [pd.Series([], audformat.segmented_index(), dtype='object')], False, pd.Series([], audformat.segmented_index(), dtype='object') ),", "( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1',", "assert db1.schemes['scheme_id'] == expected assert db2.schemes['scheme_id'] == expected # Three database db3 =", "audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1., 2.],", "1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']),", "StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s',", "test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize( 'labels, expected', [ ( [], [],", "'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), ( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ),", "'b': 1}, ), ( [{'a': 0}, {'b': 1, 'c': 2}], {'a': 0, 'b':", "['f1', 'f1', 'f2'], starts=['0s', '0s', '0s'], ends=['1s', '2s', '3s'], ), name='value', ), ),", "pd.isnull(expected_duration): assert pd.isnull(duration) else: assert duration == expected_duration @pytest.mark.parametrize( 'index, root, expected', [", "[pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pd.NaT, pd.NaT],", "False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0],", "( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')),", "filewise tables we expect a duration for every file # for segmented only", "'f2', 'f3']), dtype='category', ) ), # combine series with non-nullable dtype ( [", "[ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), #", "1], [2, 2]), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ),", "expected): obj = utils.concat(objs, overwrite=overwrite) if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj, expected)", "2}], {'a': 0, 'b': 2, 'c': 2}, ), ( [{'a': 0}, {'a': 1},", "empty ( audformat.filewise_index(), True, None, None, audformat.segmented_index(), ), ( audformat.filewise_index(), False, None, None,", "[0, 0], [1, 1]), ], audformat.segmented_index( ['f1', 'f2', 'f3'], [0, 0, 0], [1,", "), pytest.param( [{'a': 0, 'b': 1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a', 'b'],", "'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where',", "'f3']), dtype='category', ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ],", "( audformat.filewise_index(), None, audformat.filewise_index(), ), ( audformat.segmented_index(), None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1', 'f2']),", "db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language, expected', [ ('en', 'eng'), ('en', 'eng'),", "), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series( [1.,", "mask = result.get_level_values( audformat.define.IndexField.END ) == pd.NaT files = files[mask] for file in", "audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ),", "], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "), ), ( audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [", "audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.],", "marks=pytest.mark.xfail(raises=ValueError), ), ] ) def test_join_labels(labels, expected): assert utils.join_labels(labels) == expected def test_join_schemes():", "'f4']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2',", "audformat.filewise_index(), '0', ), ( audformat.segmented_index(), '0', ), ( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ), (", "None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pytest.FILE_DUR], ), ), # provide file", "audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pd.NaT, pd.NaT], ), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR *", "os.path.join('some', 'where') + os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ]", "'f2.wv']), 'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), '', None, audformat.filewise_index(['f1', 'f2']),", "pd.DataFrame([], audformat.segmented_index(), dtype='object') ), # combine series with same name ( [ pd.Series([],", "'-103439349488189352', ), ] ) def test_hash(obj, expected): assert utils.hash(obj) == expected assert utils.hash(obj[::-1])", "tables we expect a duration for every file # for segmented only where", "'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some',", "audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1', 'f2']),", "files_duration, root, expected): result = audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration, root=root, ) if not", "for f in frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs, expected', [ (", "), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1',", "[1], [2])), pd.Timedelta(1, unit='s'), ), # filewise index, but file is missing pytest.param(", "1}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1, 'c': 2}, ), (", "{'b': 1}], {'a': 0, 'b': 1}, ), ( [{'a': 0}, {'b': 1, 'c':", "created and can be removed if os.path.exists(output_folder): shutil.rmtree(output_folder) else: if table_id == 'segments':", "[0, 0, 0], [1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "pytest.param( [['a', 'b'], ['b', 'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0: {'age': 20}},", "'f2']), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [", "[ pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series( [1,", "1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(),", "[0, 0], [1, 1]), '-3831446135233514455', ), ( pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ),", "], False, pd.DataFrame( { 'c': [1., 2., 3.], }, audformat.filewise_index(['f1', 'f2', 'f3']), ),", "None, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), ), # forbid nat (", ") ] ) def test_to_filewise(output_folder, table_id, expected_file_names): has_existed = os.path.exists(output_folder) frame = utils.to_filewise_index(", "'c': 2}], {'a': 0, 'b': 1, 'c': 2}, ), ( [{'a': 0, 'b':", "== expected @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ), ( [ audformat.filewise_index(),", "True, None, None, audformat.segmented_index(), ), ( audformat.filewise_index(), False, None, None, audformat.segmented_index(), ), (", "0], [1, 1]), ), '-103439349488189352', ), ] ) def test_hash(obj, expected): assert utils.hash(obj)", "'f2']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0,", "audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), # combine series with different names ( [", "1]), audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f3'], [0, 0, 0, 0], [pd.NaT,", "[0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(", "), ( audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ),", "), # combine series with same name ( [ pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1.,", "audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2', 0, 1), ), ] ) def test_intersect(objs, expected): pd.testing.assert_index_equal(", "2.], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1.],", "1, pd.NaT, 1, pd.NaT], ), ), ] ) def test_union(objs, expected): pd.testing.assert_index_equal( audformat.utils.union(objs),", "['a']), ['a'], ), ( [{'a': 0}], {'a': 0}, ), ( [{'a': 0}, {'b':", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'), ), ( [ audformat.filewise_index(['f1', 'f2']),", "# for segmented only where end == NaT files = result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj)", "'0', ), ( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), (", ") def test_expand_file_path(tmpdir, index, root, expected): expanded_index = audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize(", "( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), (", "[2.], audformat.segmented_index('f1', 0, 1), name='c2', ), ], False, pd.DataFrame( { 'c1': [1., np.nan],", "('english', 'eng'), ('English', 'eng'), pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError)", "( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')),", "2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']),", "], False, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False,", "is missing pytest.param( audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] ) def test_duration(obj, expected_duration):", "expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize( 'labels, expected', [ ( [], [], ),", "assert db1.schemes['scheme_id'] == scheme1 # Two databases db2 = audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b':", "audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), ( audformat.filewise_index(['f1',", "audeer import audformat from audformat import utils from audformat import define @pytest.mark.parametrize( 'objs,", "None, audformat.filewise_index(['f1', 'f2']), ), ( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ]", "pd.DataFrame( {'int': [1, 2], 'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]),", "= [os.path.join(pytest.DB_ROOT, f) for f in files] assert all(os.path.exists(f) for f in files)", "[1., 2., 3., np.nan], 'c2': ['a', 'b', 'c', 'd'] }, audformat.segmented_index(['f1', 'f2', 'f3',", "1), ), ] ) def test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize( 'labels,", "def test_expand_file_path(tmpdir, index, root, expected): expanded_index = audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj,", "), # error: values do not match pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')),", "'0s'], ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''),", "[{'a': 0}, {'b': 1}], {'a': 0, 'b': 1}, ), ( [{'a': 0}, {'b':", "expected @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ), ( [ audformat.filewise_index(), ],", "2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']),", "pytest.param( [{'a': 0, 'b': 1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a', 'b'], ['b',", "combine series and data frame ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c',", "missing pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented index with NaT, but file", "'f2', 'f3'], [0, 0, 0], [1, 1, 1], ), ), ( [ audformat.segmented_index(['f1',", "unit='s'), ), ( pd.Series( index=audformat.segmented_index(['f1'], [1], [2]), dtype='category', ), pd.Timedelta(1, unit='s'), ), (", "'f2'], starts=['0s', '0s', '0s'], ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end", "os.path.exists(output_folder) frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, ) assert audformat.index_type(frame) == define.IndexType.FILEWISE", "[ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'],", "['2s', '4s'], ), '.', audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s', '3s'], ['2s', '4s'],", "), True, None, None, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), ), #", "pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'a'],", "audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), (", "( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ),", "), ) ] ) def test_expand_file_path(tmpdir, index, root, expected): expanded_index = audformat.utils.expand_file_path(index, root)", "os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f2'), ] ), ), (", "audformat.filewise_index('f1'), ), ), ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ], False,", "( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'],", "), pd.Series( 2, audformat.filewise_index('f2'), dtype='int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']),", "], False, pd.Series( True, audformat.filewise_index(['f1', 'f2']), dtype='boolean', ), ), ( [ pd.Series( 1,", "marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series(", "0], [1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "pd.Series([], audformat.filewise_index(), dtype='object'), ), ( [pd.Series([], audformat.filewise_index(), dtype='object')], False, pd.Series([], audformat.filewise_index(), dtype='object') ),", "[0.1, 0.5], [0.2, pd.NaT], ), True, None, None, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2,", "[ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), (", "0], [None, 1], ), ), ), # combine series and data frame (", "audformat.filewise_index(), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(), ],", "pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([np.nan], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')),", "[{0: {'age': 20}}, {'0': {'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError), ), ] ) def test_join_labels(labels,", "audformat.filewise_index(), ), ( audformat.segmented_index(), None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index( [", "[ # empty ( audformat.filewise_index(), True, None, None, audformat.segmented_index(), ), ( audformat.filewise_index(), False,", "import utils from audformat import define @pytest.mark.parametrize( 'objs, overwrite, expected', [ # empty", "pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ], False, pd.DataFrame( { 'c1': [1.], 'c2':", "), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( [2.], audformat.filewise_index('f2'),", "audformat.filewise_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c': [1., 2., 3.], }, audformat.filewise_index(['f1',", "[ ( [], audformat.filewise_index(), ), ( [ audformat.filewise_index(), ], audformat.filewise_index(), ), ( [", "), # error: dtypes do not match pytest.param( [ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')),", "], names=['file', 'start', 'end'], ), ), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0,", "['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), columns=['value1', 'value2'], ),", "0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, 1], ), ), ( [", "test_concat(objs, overwrite, expected): obj = utils.concat(objs, overwrite=overwrite) if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected) else:", "'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), '-3831446135233514455', ), (", "2]), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [", "[ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], True, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.],", "audformat.filewise_index(['f1', 'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1',", "1}, ), ( [{'a': 0}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1,", "[0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(),", "databases db2 = audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] = scheme2 expected =", "files of unprocessed frame are relative to `root` files = [os.path.join(pytest.DB_ROOT, f) for", "marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp', 'segments', [ str(i).zfill(3)", "( pd.DataFrame( {'a': [0, 1], 'b': [2, 3]}, audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "), ], False, pd.DataFrame( { 'c': [1., 2., 3.], }, audformat.filewise_index(['f1', 'f2', 'f3']),", "pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], True, pd.Series([2.], audformat.filewise_index('f1')), ), # combine values with", "False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where') + os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where',", "pd.Index): result = result.index pd.testing.assert_index_equal(result, expected) if files_duration and not allow_nat: # for", "audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), ( audformat.segmented_index(['f1.wav', 'f2.wav']),", "'c': 2}, ), ( [{'a': 0}, {'a': 1}, {'a': 2}], {'a': 2}, ),", "], audformat.filewise_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1', 'f2']), ),", "0, 0], [pd.NaT, 1, pd.NaT, 1, pd.NaT], ), ), ] ) def test_union(objs,", "( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a',", "marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0: {'age': 20}}, {'0': {'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError), ), ]", "( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], True, pd.Series([2.], audformat.filewise_index('f1')), ), # combine", "pd.Timedelta(1.9, unit='s'), ), ( audformat.segmented_index(['f1', 'f2'], [0, 1], [2, 2]), pd.Timedelta(3, unit='s'), ),", "'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "io import StringIO import os import shutil import numpy as np import pandas", "[0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ], False, pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ),", "( [ audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']),", "'value2': ['a', 'b', 'c'], }, index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1', 'value2'], ), ), (", "'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end,value1,value2", "audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5],", "1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index( ['f1', 'f2', 'f3'], [0,", "1], [1, 1, 2, 2], ), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(),", "with non-nullable dtype ( [ pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])),", "1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0,", "= audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration, root=root, ) if not isinstance(result, pd.Index): result =", "result) else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index, extension, pattern, expected_index', [ ( audformat.filewise_index(), 'mp3',", "audformat.filewise_index(), True, None, None, audformat.segmented_index(), ), ( audformat.filewise_index(), False, None, None, audformat.segmented_index(), ),", "pytest.param( [ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [", "), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "[], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a': 0, 'b': 1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param(", "@pytest.mark.parametrize( 'labels, expected', [ ( [], [], ), ( (['a'], ['b']), ['a', 'b'],", ") files = frame.index.get_level_values(define.IndexField.FILE).values if table_id == 'segmented': # already `framewise` frame is", "0, 0, 0], [pd.NaT, 1, pd.NaT, 1, 1], ), ), ( [ audformat.segmented_index(['f1',", "[2]), pd.Timedelta(2, unit='s'), ), ( audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9, unit='s'), ), ( audformat.segmented_index(['f1',", "), ), ( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1', 'f1', 'f2'], pd.to_timedelta(['0s',", "[1, 1]), '-3831446135233514455', ), ( pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ), ( pd.DataFrame(", "audformat.utils.replace_file_extension( index, extension, pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj, allow_nat, files_duration, root, expected',", "), '-103439349488189352', ), ] ) def test_hash(obj, expected): assert utils.hash(obj) == expected assert", "[ ( audformat.filewise_index(), '0', ), ( audformat.segmented_index(), '0', ), ( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963',", "audformat.filewise_index(['f1', 'f2', 'f4']), name='c2', ), pd.DataFrame( { 'c1': [np.nan, 3.], 'c2': ['b', 'c'],", "'f2', 'f2', 'f3'], [0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, pd.NaT],", "2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ],", "Two databases db2 = audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] = scheme2 expected", "), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), ], False, pd.Series( ['a',", "audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ], False, pd.Series([1., 2.],", "] ) def test_join_labels(labels, expected): assert utils.join_labels(labels) == expected def test_join_schemes(): # Empty", "audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([np.nan], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan],", "test_to_filewise(output_folder, table_id, expected_file_names): has_existed = os.path.exists(output_folder) frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3,", "( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'), ), ( [", "), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [", "'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "= utils.concat(objs, overwrite=overwrite) if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj,", "os.path.exists(output_folder): shutil.rmtree(output_folder) else: if table_id == 'segments': for f in frame.index.get_level_values( define.IndexField.FILE): if", "[{'a': 0, 'b': 1}, {'b': 2, 'c': 2}], {'a': 0, 'b': 2, 'c':", "'f2'], pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s', '2s', '3s']), ], names=['file', 'start', 'end'], ), ),", "( 'tmp', 'segments', [ str(i).zfill(3) + f'_{j}' for i in range(1, 11) for", "['a', 'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a', 'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError), ),", "[ ( audformat.filewise_index(), None, audformat.filewise_index(), ), ( audformat.segmented_index(), None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1',", "audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']),", "def test_concat(objs, overwrite, expected): obj = utils.concat(objs, overwrite=overwrite) if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected)", "audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame( {'int': [1, 2], 'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True,", "], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), # combine values in same location", "[0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '0s', '0s'], ends=['1s', '2s', '3s'],", "audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT,", "pd.Series([1.], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), # combine series and overwrite values", "['f1', 'f1', 'f2'], pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s', '2s', '3s']), ], names=['file', 'start', 'end'],", "'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where') +", "['b', 'c']), ['a', 'b', 'c'], ), ( (['a'], ['a']), ['a'], ), ( [{'a':", "), ( [ audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(),", "pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad language',", "are relative to `root` files = [os.path.join(pytest.DB_ROOT, f) for f in files] assert", "), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])),", "pattern, expected_index): index = audformat.utils.replace_file_extension( index, extension, pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj,", "audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32',", "'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1',", "[os.path.join(pytest.DB_ROOT, f) for f in files] assert all(os.path.exists(f) for f in files) file_names", "[pd.Series([], audformat.segmented_index(), dtype='object')], False, pd.Series([], audformat.segmented_index(), dtype='object') ), ( [pd.DataFrame([], audformat.segmented_index(), dtype='object')], False,", "'f3'], [0, 0, 0, 0], [pd.NaT, 1, 1, 1], ), ), ( [", "'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), ( audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']), ),", "pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), ( [ pd.Series( [1., 2.],", "'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), # error: values do", "index=audformat.filewise_index(['f1', 'f2', 'f3']), ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "), # file not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ),", "folder was created and can be removed if os.path.exists(output_folder): shutil.rmtree(output_folder) else: if table_id", "== pd.NaT files = files[mask] for file in files: file = os.path.join(root, file)", "'f2', 'f3'], [0, 0, 1, 1], [1, 1, 2, 2], ), ), (", "2}], {'a': 0, 'b': 1, 'c': 2}, ), ( [{'a': 0, 'b': 1},", "scheme1 = audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id'] ==", "dtype='int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), #", "audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] = scheme2 expected = audformat.Scheme(labels={'a': [1, 2],", "pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')),", "pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError),", "name='value', ), ), ( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1', 'f1', 'f2'],", "['1s', '3s'], ['2s', '4s'], ), ) ] ) def test_expand_file_path(tmpdir, index, root, expected):", "[1, 1], [2, 2]), ], audformat.segmented_index( ['f1', 'f2', 'f2', 'f3'], [0, 0, 1,", "audformat.filewise_index(['f1', 'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'),", "'1s', '2s'], ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b", "2, 'c': 2}], {'a': 0, 'b': 2, 'c': 2}, ), ( [{'a': 0},", "[ audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), ],", "audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ],", "( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where') + os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some',", "frame ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c', ), pd.DataFrame( { 'c':", "( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( [2.], audformat.filewise_index('f2'), name='c2',", "pd.Series([np.nan], audformat.filewise_index('f1')), ], True, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')),", "audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(),", "), # combine series with non-nullable dtype ( [ pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])),", "), ] ) def test_duration(obj, expected_duration): duration = audformat.utils.duration(obj) if pd.isnull(expected_duration): assert pd.isnull(duration)", "{'0': {'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError), ), ] ) def test_join_labels(labels, expected): assert utils.join_labels(labels)", "test_join_schemes(): # Empty list audformat.utils.join_schemes([], 'scheme_id') # One database db1 = audformat.Database('db1') scheme1", "ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ), name='value', ), ), ( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series(", "audformat.filewise_index(['f1', 'f2']), ), ( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ] )", "pd.Series([2.], audformat.filewise_index('f1')), ], True, pd.Series([2.], audformat.filewise_index('f1')), ), # combine values with matching dtype", "[1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2', 0, 1), ), ] )", "], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(), ), ( [", "@pytest.mark.parametrize( 'obj, expected_duration', [ ( audformat.segmented_index(), pd.Timedelta(0, unit='s'), ), ( audformat.segmented_index(['f1'], [0], [2]),", "assert file_names == expected_file_names # clean-up if not has_existed: # output folder was", "( audformat.segmented_index( ['f1', 'f2'], ['1s', '3s'], ['2s', '4s'], ), '.', audformat.segmented_index( [ audeer.safe_path('f1'),", "'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a', 'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param(", "'f3']), ), ], False, pd.DataFrame( { 'c': [1., 2., 3.], }, audformat.filewise_index(['f1', 'f2',", "audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] = scheme2 expected = audformat.Scheme(labels={'a': [1, 2], 'b': [3]}) audformat.utils.join_schemes([db1,", "import StringIO import os import shutil import numpy as np import pandas as", "False, pd.DataFrame( { 'c1': [1., 2.], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ),", "[ audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']), ],", "audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "('English', 'eng'), pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param(", "pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj, allow_nat, files_duration, root, expected', [ # empty ( audformat.filewise_index(),", "if not has_existed: # output folder was created and can be removed if", "range(1, 101)] ) ] ) def test_to_filewise(output_folder, table_id, expected_file_names): has_existed = os.path.exists(output_folder) frame", "), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1', 'f2']), ), ( [", "pd.DataFrame( { 'c1': [1., 2., 3., np.nan], 'c2': ['a', 'b', 'c', 'd'] },", "{ 'c1': [np.nan, 3.], 'c2': ['b', 'c'], }, audformat.segmented_index(['f2', 'f3']), ), ], False,", "[str(i).zfill(3) for i in range(1, 101)] ) ] ) def test_to_filewise(output_folder, table_id, expected_file_names):", "f in frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs, expected', [ ( [],", "), ( [ pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False,", "[ pd.Series( True, audformat.filewise_index('f1'), dtype='bool', ), pd.Series( True, audformat.filewise_index('f2'), dtype='bool', ), ], False,", "], False, pd.DataFrame( { 'c1': [1., 2., 3., np.nan], 'c2': ['a', 'b', 'c',", "'c': [2., 3.] }, audformat.filewise_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c': [1.,", "[ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), (", "'scheme_id') assert db1.schemes['scheme_id'] == scheme1 # Two databases db2 = audformat.Database('db2') scheme2 =", "'segmented': # already `framewise` frame is unprocessed assert os.path.isabs(output_folder) == os.path.isabs(files[0]) if table_id", "[0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2', 0, 1), ),", "dtype='Int64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series(", "], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( [1, 2, 3], index=audformat.filewise_index(['f1', 'f2',", "( audformat.segmented_index(), pd.Timedelta(0, unit='s'), ), ( audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2, unit='s'), ), (", "expected, ) @pytest.mark.parametrize( 'labels, expected', [ ( [], [], ), ( (['a'], ['b']),", "), pd.DataFrame( { 'c1': [np.nan, 3.], 'c2': ['b', 'c'], }, audformat.segmented_index(['f2', 'f3']), ),", "table_id == 'segmented': # already `framewise` frame is unprocessed assert os.path.isabs(output_folder) == os.path.isabs(files[0])", "'-4231615416436839963', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), (", "'b': 1}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1, 'c': 2}, ),", "{ os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR,", "dtype='category', ), pd.Series( ['a', 'b', 'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False,", "import audformat from audformat import utils from audformat import define @pytest.mark.parametrize( 'objs, overwrite,", "index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), name='value', ),", "), pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series(", "name='c', ), pd.DataFrame( { 'c': [2., 3.] }, audformat.filewise_index(['f2', 'f3']), ), ], False,", "audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ),", "removed if os.path.exists(output_folder): shutil.rmtree(output_folder) else: if table_id == 'segments': for f in frame.index.get_level_values(", "pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ], False, pd.DataFrame( { 'c1': [1.], 'c2': [2.], }, audformat.filewise_index('f1'),", "match pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), ],", "'f1', 'f2', 'f2', 'f3'], [0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1,", "ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays(", "1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index('f2', 0, 1), ), (", "os.remove(f) @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ), ( [ audformat.filewise_index(), ],", "), ( [ audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1',", "audformat.define.IndexField.END ) == pd.NaT files = files[mask] for file in files: file =", "location ( [ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([np.nan], audformat.filewise_index('f1')), ), (", "with NaT, but file is missing pytest.param( audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ]", "pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj, expected', [ ( audformat.filewise_index(), '0', ), ( audformat.segmented_index(), '0',", "duration == expected_duration @pytest.mark.parametrize( 'index, root, expected', [ ( audformat.filewise_index(), None, audformat.filewise_index(), ),", "pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ], False, pd.Series( [1., 2.], audformat.filewise_index(['f1',", "audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], True, pd.Series([2.], audformat.filewise_index('f1')), ),", "2}, ), pytest.param( ['a', 'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a', 'b', 'c'),", "['f1', 'f2'], ['1s', '3s'], ['2s', '4s'], ), '.', audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ],", "audformat.segmented_index(pytest.DB.files[:2]), ), ] ) def test_to_segmented_index(obj, allow_nat, files_duration, root, expected): result = audformat.utils.to_segmented_index(", "), # series and frame ( pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None,", "audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ],", "), # segmented index with NaT, but file is missing pytest.param( audformat.segmented_index(['f1'], [0]),", "'b': 1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a', 'b'], ['b', 'c'], 'd'], [],", "os import shutil import numpy as np import pandas as pd import pytest", "'f2']), dtype='float64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ),", "], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']),", "if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask = result.get_level_values( audformat.define.IndexField.END ) == pd.NaT files =", "}, audformat.filewise_index('f1'), ), ), ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ],", "pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ),", "( [{'a': 0}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1, 'c': 2},", "not isinstance(result, pd.Index): result = result.index pd.testing.assert_index_equal(result, expected) if files_duration and not allow_nat:", "audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), ), '-103439349488189352', ), ] ) def test_hash(obj,", "i in range(1, 11) for j in range(10) ] ), ( 'tmp', 'files',", "1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "a duration for every file # for segmented only where end == NaT", "audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ), # error: dtypes do not match pytest.param(", "], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1', 'f2',", "audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index('f2', 0, 1), ), ( [", "[1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'],", "series with non-nullable dtype ( [ pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1',", "pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, pd.Series( ['a',", "audformat.segmented_index(), None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ]", "pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj, expected_duration', [ ( audformat.segmented_index(), pd.Timedelta(0, unit='s'), ), ( audformat.segmented_index(['f1'],", "be removed if os.path.exists(output_folder): shutil.rmtree(output_folder) else: if table_id == 'segments': for f in", "( [ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ),", "pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ), ( pd.DataFrame( {'a': [0, 1], 'b': [2,", "( [], audformat.filewise_index(), ), ( [ audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(),", "0}, {'b': 1}], {'a': 0, 'b': 1}, ), ( [{'a': 0}, {'b': 1,", "pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),", "[], False, pd.Series([], audformat.filewise_index(), dtype='object'), ), ( [pd.Series([], audformat.filewise_index(), dtype='object')], False, pd.Series([], audformat.filewise_index(),", "allow_nat, files_duration, root, expected): result = audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration, root=root, ) if", "[1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']),", "'f2'], starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ), name='value', ), ), ( StringIO('''file,end,value", "[2, 3]}, audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), ), '-103439349488189352', ), ] )", "[0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'],", "[0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5],", "audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']),", "2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),", "expected', [ ('en', 'eng'), ('en', 'eng'), ('english', 'eng'), ('English', 'eng'), pytest.param( 'xx', None,", "audformat.Scheme(labels={'a': [4], 'b': [3]}) audformat.utils.join_schemes([db1, db2, db3], 'scheme_id') # Fail for schemes without", "StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s',", "import audeer import audformat from audformat import utils from audformat import define @pytest.mark.parametrize(", "'obj, expected', [ ( audformat.filewise_index(), '0', ), ( audformat.segmented_index(), '0', ), ( audformat.filewise_index(['f1',", "[pd.NaT, 1, pd.NaT, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, pd.Series(", "'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ) ), # combine series with non-nullable dtype", "False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), ( [ pd.Series( [1.,", "audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [", "(['a'], ['b']), ['a', 'b'], ), ( (['a'], ['b', 'c']), ['a', 'b', 'c'], ),", "2., 3.], }, audformat.filewise_index(['f1', 'f2', 'f3']), ), ), ( [ pd.Series( [1., 2.],", "] ), ( 'tmp', 'files', [str(i).zfill(3) for i in range(1, 101)] ) ]", "[0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]),", "marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a', 'b'], ['b', 'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0:", "name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan,", "pytest.param( [{0: {'age': 20}}, {'0': {'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError), ), ] ) def", "'c', 'd'] }, audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ), # error: dtypes do", "'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), (", "'c': 2}, ), ( [{'a': 0, 'b': 1}, {'b': 1, 'c': 2}], {'a':", "'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ),", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1', 'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1',", "None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index(pytest.DB.files[:2]), False,", "), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1',", "( [{'a': 0}, {'a': 1}, {'a': 2}], {'a': 2}, ), pytest.param( ['a', 'b',", "[4]}) db3.schemes['scheme_id'] = scheme3 expected = audformat.Scheme(labels={'a': [4], 'b': [3]}) audformat.utils.join_schemes([db1, db2, db3],", "2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), # combine series with different names (", "[0.2, pytest.FILE_DUR], ), ), # provide file durations ( audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT,", "( pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame(", "pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series( [1, 2],", "index, but file is missing pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented index", "audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), ( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV',", "file in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param( '.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param(", "audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215',", "), ( [{'a': 0, 'b': 1}, {'b': 1, 'c': 2}], {'a': 0, 'b':", "[1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0,", "), ], ) def test_concat(objs, overwrite, expected): obj = utils.concat(objs, overwrite=overwrite) if isinstance(obj,", "), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2':", "['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ) ), # combine series with", "db1.schemes['scheme_id'] == expected assert db2.schemes['scheme_id'] == expected # Three database db3 = audformat.Database('db3')", "[1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'],", "result.index pd.testing.assert_index_equal(result, expected) if files_duration and not allow_nat: # for filewise tables we", "] ) def test_duration(obj, expected_duration): duration = audformat.utils.duration(obj) if pd.isnull(expected_duration): assert pd.isnull(duration) else:", "None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category',", "'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, pd.Series( ['a', 'b', 'a'],", "'scheme_id') # One database db1 = audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id']", "= scheme3 expected = audformat.Scheme(labels={'a': [4], 'b': [3]}) audformat.utils.join_schemes([db1, db2, db3], 'scheme_id') #", "), ( pd.DataFrame( {'int': [1, 2], 'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None,", "index=audformat.segmented_index(['f1'], [1], [2]), dtype='category', ), pd.Timedelta(1, unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1,", "'f3']), ], audformat.filewise_index('f2'), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(),", "audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), (", "], audformat.segmented_index( ['f1', 'f2', 'f3'], [0, 0, 0], [1, 1, 1], ), ),", "'tmp', 'segments', [ str(i).zfill(3) + f'_{j}' for i in range(1, 11) for j", "audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), (", "( [pd.DataFrame([], audformat.segmented_index(), dtype='object')], False, pd.DataFrame([], audformat.segmented_index(), dtype='object') ), # combine series with", "in range(1, 11) for j in range(10) ] ), ( 'tmp', 'files', [str(i).zfill(3)", "None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series and frame ( pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ),", "'index, extension, pattern, expected_index', [ ( audformat.filewise_index(), 'mp3', None, audformat.filewise_index(), ), ( audformat.segmented_index(),", "if os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ), ( [", "audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ),", "( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1', 'f2']), ), ( [ audformat.filewise_index(['f1',", "), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( ['a', np.nan,", "[pd.NaT, pd.NaT], ), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index(", "unprocessed assert os.path.isabs(output_folder) == os.path.isabs(files[0]) if table_id == 'files': # files of unprocessed", "with same name ( [ pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ],", "False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ) ), # combine", "'f3']), dtype='category', ), pd.Series( ['a', 'b', 'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ],", "), pytest.param( os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp', 'segments', [ str(i).zfill(3) +", "1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]),", "[ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']),", "['b']), ['a', 'b'], ), ( (['a'], ['b', 'c']), ['a', 'b', 'c'], ), (", "), # combine series with different names ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.],", "), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ) ), (", "'xx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad language', None,", "not has_existed: # output folder was created and can be removed if os.path.exists(output_folder):", "0], [1, 1]), ], audformat.segmented_index( ['f1', 'f2', 'f3'], [0, 0, 0], [1, 1,", "), ( [ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])),", "( audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), '-3831446135233514455', ), ( pd.Series([0, 1], audformat.filewise_index(['f1',", "'b'], ), ( (['a'], ['b', 'c']), ['a', 'b', 'c'], ), ( (['a'], ['a']),", "[ str(i).zfill(3) + f'_{j}' for i in range(1, 11) for j in range(10)", "'b': 1}, {'b': 2, 'c': 2}], {'a': 0, 'b': 2, 'c': 2}, ),", "'f2.wav']), '', None, audformat.filewise_index(['f1', 'f2']), ), ( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']),", "* 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ),", "for f in files] assert file_names == expected_file_names # clean-up if not has_existed:", "'f2', 'f2', 'f3'], [0, 0, 1, 1], [1, 1, 2, 2], ), ),", "dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( ['a', 'b', 'a'],", "( audformat.segmented_index(), True, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), False, None, None, audformat.segmented_index(),", "'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), (", "'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'c'], index=audformat.filewise_index(['f1', 'f2',", "{ 'c': [1., 2., 3.], }, audformat.filewise_index(['f1', 'f2', 'f3']), ), ), ( [", "2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64',", "'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2',", "'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp', 'segments', [ str(i).zfill(3) + f'_{j}' for i", "0}, {'a': 1}, {'a': 2}], {'a': 2}, ), pytest.param( ['a', 'b', 'c'], [],", "'c1': [1., 2., 3., np.nan], 'c2': ['a', 'b', 'c', 'd'] }, audformat.segmented_index(['f1', 'f2',", "test_read_csv(csv, result): obj = audformat.utils.read_csv(csv) if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result) elif isinstance(result, pd.Series):", "pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index, extension, pattern, expected_index', [ ( audformat.filewise_index(), 'mp3', None, audformat.filewise_index(),", "1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '0s', '0s'], ends=['1s', '2s', '3s'], ),", "[2.], }, audformat.filewise_index('f1'), ), ), ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'),", "name='value', ), ), ( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0,", "# empty ( audformat.filewise_index(), True, None, None, audformat.segmented_index(), ), ( audformat.filewise_index(), False, None,", "audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], True, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.],", "( [{'a': 0, 'b': 1}, {'b': 2, 'c': 2}], {'a': 0, 'b': 2,", "series and data frame ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c', ),", "[0.1, 0.5], [pd.NaT, pd.NaT], ), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, },", "assert all(os.path.exists(f) for f in files) file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in", "assert utils.hash(obj) == expected assert utils.hash(obj[::-1]) == expected @pytest.mark.parametrize( 'objs, expected', [ (", "None, audformat.segmented_index(), ), # allow nat ( audformat.filewise_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ),", "}, audformat.segmented_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c1': [1., 2., 3., np.nan],", "# forbid nat ( audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR,", "expected = audformat.Scheme(labels={'a': [4], 'b': [3]}) audformat.utils.join_schemes([db1, db2, db3], 'scheme_id') # Fail for", "dtype='float64', ), ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ),", "'f3'], [0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, pd.NaT], ), ),", "pytest.FILE_DUR] ), ), ( audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR,", "'obj, allow_nat, files_duration, root, expected', [ # empty ( audformat.filewise_index(), True, None, None,", "dtype='boolean', ), ), ( [ pd.Series( 1, audformat.filewise_index('f1'), dtype='int64', ), pd.Series( 2, audformat.filewise_index('f2'),", "), ( [{'a': 0}], {'a': 0}, ), ( [{'a': 0}, {'b': 1}], {'a':", "+ f'_{j}' for i in range(1, 11) for j in range(10) ] ),", "'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "{'a': 0, 'b': 2, 'c': 2}, ), ( [{'a': 0}, {'a': 1}, {'a':", "[], audformat.filewise_index(), ), ( [ audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(), audformat.filewise_index(),", "True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ),", "'b', 'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), #", "series and overwrite values ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], True, pd.Series([1.],", "'eng'), ('English', 'eng'), pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ),", "audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index( ['f1',", "}, audformat.filewise_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c': [1., 2., 3.], },", "audformat.filewise_index(), 'mp3', None, audformat.filewise_index(), ), ( audformat.segmented_index(), 'mp3', None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav',", "index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), ],", "# combine series with same name ( [ pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1., 2.],", "audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] = scheme3 expected = audformat.Scheme(labels={'a': [4], 'b': [3]}) audformat.utils.join_schemes([db1, db2,", "audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language, expected', [ ('en', 'eng'), ('en', 'eng'), ('english', 'eng'), ('English',", "'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ] ) def test_to_segmented_index(obj, allow_nat,", "name='c2', ), pd.DataFrame( { 'c1': [np.nan, 3.], 'c2': ['b', 'c'], }, audformat.segmented_index(['f2', 'f3']),", "'files': # files of unprocessed frame are relative to `root` files = [os.path.join(pytest.DB_ROOT,", "marks=pytest.mark.xfail(raises=ValueError) ) ]) def test_read_csv(csv, result): obj = audformat.utils.read_csv(csv) if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj,", "for f in files] assert all(os.path.exists(f) for f in files) file_names = [f.split(os.path.sep)[-1].rsplit('.',", "'f2']), name='c', ), pd.DataFrame( { 'c': [2., 3.] }, audformat.filewise_index(['f2', 'f3']), ), ],", "audformat.filewise_index('f1'), name='c2'), ], False, pd.DataFrame( { 'c1': [1.], 'c2': [2.], }, audformat.filewise_index('f1'), ),", "pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1',", "name='c2', ), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], },", "pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), # combine series with different", "False, pd.DataFrame( { 'c1': [1.], 'c2': [2.], }, audformat.filewise_index('f1'), ), ), ( [", "test_hash(obj, expected): assert utils.hash(obj) == expected assert utils.hash(obj[::-1]) == expected @pytest.mark.parametrize( 'objs, expected',", "audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( [2.], audformat.filewise_index('f2'), name='c2', ), ], False, pd.DataFrame( {", "pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.filewise_index(['f1', 'f2',", "pd.Series([1.], audformat.filewise_index('f1')), ), # combine series and overwrite values ( [ pd.Series([1.], audformat.filewise_index('f1')),", "assert file in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param( '.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ),", "f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1', 'f1', 'f2'], pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s', '2s', '3s']),", "audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), '-3831446135233514455', ),", "{'a': 2}], {'a': 2}, ), pytest.param( ['a', 'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param(", "# clean-up if not has_existed: # output folder was created and can be", "pytest.param( os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp', 'segments', [ str(i).zfill(3) + f'_{j}'", "[ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ],", "2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1',", "= os.path.join(root, file) assert file in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param( '.', 'segments',", "[1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index( ['f1', 'f2', 'f3'],", "[ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'), audformat.filewise_index( [", "), ), ( StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.filewise_index(['f1', 'f2',", "[ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1,", "'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2', 0, 1), ), (", "( [ pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.],", "pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s', '2s', '3s']), ], names=['file', 'start', 'end'], ), ), (", "def test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize( 'labels, expected', [ ( [],", "combine series with different names ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'),", "import pandas as pd import pytest import audeer import audformat from audformat import", "audformat.filewise_index(['f1', 'f2']), dtype='boolean', ), ), ( [ pd.Series( 1, audformat.filewise_index('f1'), dtype='int64', ), pd.Series(", "[1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( ['a', np.nan, 'd'], audformat.filewise_index(['f1', 'f2', 'f4']),", "os.path.join('some', 'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ),", "utils.concat(objs, overwrite=overwrite) if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj, expected_duration',", "pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False,", "frame ( pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), (", "== 'segments': for f in frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs, expected',", "[0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1,", "0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2', 0, 1), ), ]", "f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s',", "'f3']), name='value', ), ), ( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame( { 'value1': [0.0,", "), pytest.param( ('a', 'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a': 0, 'b': 1},", "pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), ], ) def test_concat(objs,", "expected def test_join_schemes(): # Empty list audformat.utils.join_schemes([], 'scheme_id') # One database db1 =", "name='c2'), ], False, pd.DataFrame( { 'c1': [1.], 'c2': [2.], }, audformat.filewise_index('f1'), ), ),", "'c'), [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a': 0, 'b': 1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError), ),", "]) def test_read_csv(csv, result): obj = audformat.utils.read_csv(csv) if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result) elif", "series and frame ( pd.Series( [1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]),", "== expected def test_join_schemes(): # Empty list audformat.utils.join_schemes([], 'scheme_id') # One database db1", "str(i).zfill(3) + f'_{j}' for i in range(1, 11) for j in range(10) ]", "), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), # combine", "), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(),", "'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.filewise_index(['f1',", "'f2']), dtype='Int64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ),", "), # filewise index, but file is missing pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ),", "( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1,", "}, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), #", "0.5], [0.2, pytest.FILE_DUR], ), ), # provide file durations ( audformat.filewise_index(pytest.DB.files[:2]), False, {", "), ] ) def test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize( 'labels, expected',", "if pd.isnull(expected_duration): assert pd.isnull(duration) else: assert duration == expected_duration @pytest.mark.parametrize( 'index, root, expected',", "2], ), ), # file not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None, None, None,", "audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1',", "('a', 'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a': 0, 'b': 1}, ['c']], [],", "result) elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index, extension, pattern,", "[0.1, 0.5], [0.2, pd.NaT], ), ), # forbid nat ( audformat.filewise_index(pytest.DB.files[:2]), False, None,", "pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='int64', ), pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64',", "os.path.isabs(files[0]) if table_id == 'files': # files of unprocessed frame are relative to", "np.nan], 'c2': [np.nan, 2.], }, audformat.segmented_index( ['f1', 'f1'], [0, 0], [None, 1], ),", "np.nan], 'c2': ['a', 'b', 'c', 'd'] }, audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ),", "[0.1], [2]), pd.Timedelta(1.9, unit='s'), ), ( audformat.segmented_index(['f1', 'f2'], [0, 1], [2, 2]), pd.Timedelta(3,", "index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value', ), ), ( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame( {", "), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0],", "pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '0s', '0s'], ends=['1s', '2s',", "[0, 1], [2, 2]), pd.Timedelta(3, unit='s'), ), ( pd.Series( index=audformat.segmented_index(['f1'], [1], [2]), dtype='category',", "database db1 = audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1],", "'language, expected', [ ('en', 'eng'), ('en', 'eng'), ('english', 'eng'), ('English', 'eng'), pytest.param( 'xx',", "[0.0, 1.0, 2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value', ), ), ( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b", "[{'a': 0, 'b': 1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a', 'b'], ['b', 'c'],", "db2.schemes['scheme_id'] = scheme2 expected = audformat.Scheme(labels={'a': [1, 2], 'b': [3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id')", "2, 2], ), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [", "f3,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.filewise_index(['f1',", "== expected_file_names # clean-up if not has_existed: # output folder was created and", "marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a', 'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a': 0, 'b':", "(['a'], ['a']), ['a'], ), ( [{'a': 0}], {'a': 0}, ), ( [{'a': 0},", "StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value', ),", "audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "] ) def test_expand_file_path(tmpdir, index, root, expected): expanded_index = audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected)", "}, audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ), # error: dtypes do not match", "2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ),", "'f3']), ) ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category',", "[1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f3'], [0, 0, 0, 0],", "'f2']), '-4231615416436839963', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ),", "audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( [1, 2, 3], index=audformat.filewise_index(['f1',", "'.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ] ) def test_replace_file_extension(index, extension, pattern, expected_index): index =", "), ( 'tmp', 'segments', [ str(i).zfill(3) + f'_{j}' for i in range(1, 11)", "'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ) ), ( [ pd.Series( ['a', 'b', 'a'],", "['a', 'b', 'c', 'd'] }, audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ), # error:", "extension, pattern, expected_index): index = audformat.utils.replace_file_extension( index, extension, pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize(", "[1., 2.], audformat.filewise_index(['f1', 'f2']), name='c', ), pd.DataFrame( { 'c': [2., 3.] }, audformat.filewise_index(['f2',", "'f2', 'f3']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ),", "), ), ), # combine series and data frame ( [ pd.Series( [1.,", "audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration, root=root, ) if not isinstance(result, pd.Index): result = result.index", "== expected assert db2.schemes['scheme_id'] == expected # Three database db3 = audformat.Database('db3') scheme3", "], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')),", "dtype='category', ), pd.Timedelta(1, unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1, unit='s'), ), #", "), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0,", "assert duration == expected_duration @pytest.mark.parametrize( 'index, root, expected', [ ( audformat.filewise_index(), None, audformat.filewise_index(),", "'f2']), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(), ),", "pd.DataFrame( { 'c1': [np.nan, 3.], 'c2': ['b', 'c'], }, audformat.segmented_index(['f2', 'f3']), ), ],", "), ( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1',", "'f2.MP3']), ), ( audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']),", "else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index, extension, pattern, expected_index', [ ( audformat.filewise_index(), 'mp3', None,", "[1.], 'c2': [2.], }, audformat.filewise_index('f1'), ), ), ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.],", "), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), # error: values do not match pytest.param(", "audformat.filewise_index(), dtype='object') ), ( [pd.Series([], audformat.segmented_index(), dtype='object')], False, pd.Series([], audformat.segmented_index(), dtype='object') ), (", "20}}, {'0': {'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError), ), ] ) def test_join_labels(labels, expected): assert", "False, pd.Series([np.nan], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([1.],", "0, 0, 0], [pd.NaT, 1, pd.NaT, 1, pd.NaT], ), ), ] ) def", "audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ] ) def test_replace_file_extension(index, extension, pattern, expected_index): index = audformat.utils.replace_file_extension(", "assert audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files = frame.index.get_level_values(define.IndexField.FILE).values if table_id", "[ ['f1', 'f1', 'f2'], pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s', '2s', '3s']), ], names=['file', 'start',", "'0', ), ( audformat.segmented_index(), '0', ), ( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ), ( audformat.segmented_index(['f1',", "'eng'), ('en', 'eng'), ('english', 'eng'), ('English', 'eng'), pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param(", "False, pd.Series([], audformat.segmented_index(), dtype='object') ), ( [pd.DataFrame([], audformat.segmented_index(), dtype='object')], False, pd.DataFrame([], audformat.segmented_index(), dtype='object')", "forbid nat ( audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR]", "True, audformat.filewise_index('f2'), dtype='bool', ), ], False, pd.Series( True, audformat.filewise_index(['f1', 'f2']), dtype='boolean', ), ),", "( [ pd.Series( True, audformat.filewise_index('f1'), dtype='bool', ), pd.Series( True, audformat.filewise_index('f2'), dtype='bool', ), ],", "audformat.segmented_index(), dtype='object')], False, pd.Series([], audformat.segmented_index(), dtype='object') ), ( [pd.DataFrame([], audformat.segmented_index(), dtype='object')], False, pd.DataFrame([],", "scheme1 # Two databases db2 = audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] =", "0, 0], [1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "test_map_language(language, expected): assert utils.map_language(language) == expected @pytest.mark.parametrize('csv,result', [ ( StringIO('''file f1 f2 f3'''),", "[ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']),", "audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(), ), ( [", "None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), ( audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']), ), (", "root=root, ) if not isinstance(result, pd.Index): result = result.index pd.testing.assert_index_equal(result, expected) if files_duration", "'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'), ),", "'c'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a', 'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a':", "['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), name='value', ), ),", "pd.Series( ['a', np.nan, 'd'], audformat.filewise_index(['f1', 'f2', 'f4']), name='c2', ), pd.DataFrame( { 'c1': [np.nan,", "StringIO import os import shutil import numpy as np import pandas as pd", "audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ], False,", "audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), '', None, audformat.filewise_index(['f1',", "[0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index( ['f1', 'f1', 'f2',", "['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2',", "pd.NaT]), ), name='value', ), ), ( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0,", "@pytest.mark.parametrize( 'language, expected', [ ('en', 'eng'), ('en', 'eng'), ('english', 'eng'), ('English', 'eng'), pytest.param(", "`framewise` frame is unprocessed assert os.path.isabs(output_folder) == os.path.isabs(files[0]) if table_id == 'files': #", "), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( [2.],", "allow_nat=allow_nat, files_duration=files_duration, root=root, ) if not isinstance(result, pd.Index): result = result.index pd.testing.assert_index_equal(result, expected)", "[np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1.], audformat.filewise_index('f1'), name='c1'),", "root, expected): expanded_index = audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj, expected', [ (", "audformat.filewise_index('f1')), ), # combine values with matching dtype ( [ pd.Series( [1, 2],", "f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s',", "Empty list audformat.utils.join_schemes([], 'scheme_id') # One database db1 = audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a':", "), ( audformat.segmented_index(), False, None, None, audformat.segmented_index(), ), # allow nat ( audformat.filewise_index(pytest.DB.files[:2]),", "( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ),", "['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, pd.Series( ['a', 'b',", "False, None, marks=pytest.mark.xfail(raises=ValueError), ), # error: values do not match pytest.param( [ pd.Series([1.],", "dtype='category', ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', )", "), # allow nat ( audformat.filewise_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]),", "unit='s'), ), # filewise index, but file is missing pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError),", "pd.Timedelta(2, unit='s'), ), ( audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9, unit='s'), ), ( audformat.segmented_index(['f1', 'f2'],", "3.], }, audformat.filewise_index(['f1', 'f2', 'f3']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1',", "expected_index): index = audformat.utils.replace_file_extension( index, extension, pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj, allow_nat,", "'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param(", "3.], 'c2': ['b', 'c'], }, audformat.segmented_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c1':", "), ( audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9, unit='s'), ), ( audformat.segmented_index(['f1', 'f2'], [0, 1],", "expected assert db2.schemes['scheme_id'] == expected # Three database db3 = audformat.Database('db3') scheme3 =", "audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] = scheme3 expected = audformat.Scheme(labels={'a': [4], 'b':", "audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), True, None, None, audformat.segmented_index( pytest.DB.files[:2], [0.1,", "audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), ( [ pd.Series([1.],", "( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1',", "'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ] ) def test_replace_file_extension(index, extension, pattern, expected_index):", "audformat.define.IndexType.SEGMENTED: mask = result.get_level_values( audformat.define.IndexField.END ) == pd.NaT files = files[mask] for file", "result.get_level_values( audformat.define.IndexField.END ) == pd.NaT files = files[mask] for file in files: file", "db1.schemes['scheme_id'] == scheme1 # Two databases db2 = audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b': [3]})", "), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.filewise_index(),", "[0.2, pd.NaT], ), True, None, None, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ),", "audformat.segmented_index(), 'mp3', None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ),", "], ) def test_concat(objs, overwrite, expected): obj = utils.concat(objs, overwrite=overwrite) if isinstance(obj, pd.Series):", "obj = utils.concat(objs, overwrite=overwrite) if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize(", "0], [1, 1]), '-3831446135233514455', ), ( pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ), (", "[ ( [], [], ), ( (['a'], ['b']), ['a', 'b'], ), ( (['a'],", "audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ]", "[1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index( ['f1', 'f2', 'f2',", "'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad language', None, marks=pytest.mark.xfail(raises=ValueError) ) ] ) def", "[1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( [2.], audformat.filewise_index('f2'), name='c2', ), ], False,", "test_to_segmented_index(obj, allow_nat, files_duration, root, expected): result = audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration, root=root, )", "file = os.path.join(root, file) assert file in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param( '.',", "[None, 1], ), ), ), # combine series and data frame ( [", "expected): assert utils.join_labels(labels) == expected def test_join_schemes(): # Empty list audformat.utils.join_schemes([], 'scheme_id') #", "0.5], [pd.NaT, pd.NaT], ), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT,", "['2s', '4s'], ), ) ] ) def test_expand_file_path(tmpdir, index, root, expected): expanded_index =", "name='c1', ), pd.Series( ['a', np.nan, 'd'], audformat.filewise_index(['f1', 'f2', 'f4']), name='c2', ), pd.DataFrame( {", "not match pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ),", "[], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0: {'age': 20}}, {'0': {'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError), ),", "expected): result = audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration, root=root, ) if not isinstance(result, pd.Index):", "audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.segmented_index(),", "only where end == NaT files = result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask", "), ), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1',", "root, expected): result = audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration, root=root, ) if not isinstance(result,", "db2, db3], 'scheme_id') # Fail for schemes without labels with pytest.raises(ValueError): db =", "'f2']), dtype='boolean', ), ), ( [ pd.Series( 1, audformat.filewise_index('f1'), dtype='int64', ), pd.Series( 2,", "), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1,", "False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ) ), ( [ pd.Series(", "2, 'c': 2}, ), ( [{'a': 0}, {'a': 1}, {'a': 2}], {'a': 2},", "audformat.segmented_index(), dtype='object') ), # combine series with same name ( [ pd.Series([], audformat.filewise_index(),", ") ]) def test_read_csv(csv, result): obj = audformat.utils.read_csv(csv) if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result)", "[0], [2]), pd.Timedelta(2, unit='s'), ), ( audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9, unit='s'), ), (", "( audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None, audformat.filewise_index(['f1', 'f2.mp3']), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), '', None,", "[0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] ) def test_duration(obj, expected_duration): duration = audformat.utils.duration(obj) if", "0, 'b': 1}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1, 'c': 2},", "scheme2 expected = audformat.Scheme(labels={'a': [1, 2], 'b': [3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id') assert db1.schemes['scheme_id']", "audformat.utils.join_schemes([], 'scheme_id') # One database db1 = audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a': [1, 2]})", "pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), # combine values in same location ( [", "# file not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), #", "table_id, expected_file_names): has_existed = os.path.exists(output_folder) frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, )", "( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( ['a', np.nan, 'd'],", "), ] ) def test_to_segmented_index(obj, allow_nat, files_duration, root, expected): result = audformat.utils.to_segmented_index( obj,", "'.', audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s', '3s'], ['2s', '4s'], ), ) ]", "assert pd.isnull(duration) else: assert duration == expected_duration @pytest.mark.parametrize( 'index, root, expected', [ (", "None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), ( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), (", "0], [pd.NaT, 1, pd.NaT, 1, pd.NaT], ), ), ] ) def test_union(objs, expected):", "2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '0s', '0s'], ends=['1s', '2s', '3s'], ), name='value',", "None, None, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), ), # forbid nat", "audformat.segmented_index(), False, None, None, audformat.segmented_index(), ), # allow nat ( audformat.filewise_index(pytest.DB.files[:2]), True, None,", "audformat.filewise_index(), dtype='object'), ), ( [pd.Series([], audformat.filewise_index(), dtype='object')], False, pd.Series([], audformat.filewise_index(), dtype='object') ), (", "[ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']),", "assert utils.join_labels(labels) == expected def test_join_schemes(): # Empty list audformat.utils.join_schemes([], 'scheme_id') # One", "[1, 1, 2, 2], ), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ),", "[1., np.nan], 'c2': [np.nan, 2.], }, audformat.segmented_index( ['f1', 'f1'], [0, 0], [None, 1],", "audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some',", "audformat.filewise_index(pytest.DB.files[:2]), False, None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series and frame ( pd.Series(", "expected', [ # empty ( audformat.filewise_index(), True, None, None, audformat.segmented_index(), ), ( audformat.filewise_index(),", "0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'],", "pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), ( audformat.segmented_index(", "f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1', 'f1', 'f2'], pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s', '2s', '3s']), ],", "[{'a': 0}, {'a': 1}, {'a': 2}], {'a': 2}, ), pytest.param( ['a', 'b', 'c'],", "False, pd.Series([1.], audformat.filewise_index('f1')), ), # combine series and overwrite values ( [ pd.Series([1.],", "( [ pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1.,", "'2s', '3s'], ), columns=['value1', 'value2'], ), ), pytest.param( StringIO('''value 0.0 1.0 2.0'''), None,", "@pytest.mark.parametrize( 'obj, expected', [ ( audformat.filewise_index(), '0', ), ( audformat.segmented_index(), '0', ), (", "( [pd.Series([], audformat.filewise_index(), dtype='object')], False, pd.Series([], audformat.filewise_index(), dtype='object') ), ( [pd.Series([], audformat.segmented_index(), dtype='object')],", "), ( [{'a': 0}, {'a': 1}, {'a': 2}], {'a': 2}, ), pytest.param( ['a',", "'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), ], False,", "files[mask] for file in files: file = os.path.join(root, file) assert file in files_duration", "), ( pd.Series( index=audformat.segmented_index(['f1'], [1], [2]), dtype='category', ), pd.Timedelta(1, unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'],", "= audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj, expected', [ ( audformat.filewise_index(), '0', ),", "pd.DataFrame( { 'c1': [1.], 'c2': [2.], }, audformat.filewise_index('f1'), ), ), ( [ pd.Series([1.],", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.filewise_index(['f1', 'f2']),", "db3], 'scheme_id') # Fail for schemes without labels with pytest.raises(ValueError): db = audformat.Database('db')", "2], ), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1',", "], audformat.segmented_index('f2', 0, 1), ), ] ) def test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected,", "0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index( ['f1', 'f2',", "audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), (", "{ 'c1': [1., 2.], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), (", "# combine series with non-nullable dtype ( [ pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1,", "'d'] }, audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ), # error: dtypes do not", "False, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), True, None, None, audformat.segmented_index(), ), (", "'c']), ['a', 'b', 'c'], ), ( (['a'], ['a']), ['a'], ), ( [{'a': 0}],", "error: values do not match pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], False,", "audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where') + os.path.sep,", "utils.hash(obj[::-1]) == expected @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ), ( [", "audformat.filewise_index(['f1', 'f2']), dtype='int64', ), pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ], False,", "None, marks=pytest.mark.xfail(raises=ValueError), ), # error: values do not match pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')),", "audformat.segmented_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT],", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(),", "), ), ( [ pd.Series( [1.], audformat.filewise_index('f1'), name='c1'), pd.Series( [2.], audformat.segmented_index('f1', 0, 1),", "] ) def test_map_language(language, expected): assert utils.map_language(language) == expected @pytest.mark.parametrize('csv,result', [ ( StringIO('''file", "audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0, 0, 0, 0], [pd.NaT, 1,", "'f2'], ['1s', '3s'], ['2s', '4s'], ), '.', audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s',", "'f2'])), ), # combine values in same location ( [ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan],", "'f2']), name='c1', ), pd.Series( ['a', np.nan, 'd'], audformat.filewise_index(['f1', 'f2', 'f4']), name='c2', ), pd.DataFrame(", "audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1',", "( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ], False, pd.DataFrame( { 'c1':", "'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(),", "pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c', ), pd.DataFrame( { 'c': [2., 3.] },", "], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ) ), ( [", "pd.Series): pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index, extension, pattern, expected_index', [ (", "False, pd.Series([], audformat.filewise_index(), dtype='object') ), ( [pd.Series([], audformat.segmented_index(), dtype='object')], False, pd.Series([], audformat.segmented_index(), dtype='object')", "[ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([np.nan], audformat.filewise_index('f1')), ), ( [ pd.Series([1.],", "2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1',", "'f3'], [0, 0, 1, 1], [1, 1, 2, 2], ), ), ( [", "{'a': 0}, ), ( [{'a': 0}, {'b': 1}], {'a': 0, 'b': 1}, ),", "audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),", "2, 3], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "output_folder=output_folder, num_workers=3, ) assert audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files =", "frame is unprocessed assert os.path.isabs(output_folder) == os.path.isabs(files[0]) if table_id == 'files': # files", "audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1', 'f2', 'f3',", "'f2', 'f3']), dtype='category', ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']),", "name='c1', ), pd.Series( [2.], audformat.filewise_index('f2'), name='c2', ), ], False, pd.DataFrame( { 'c1': [1.,", "== scheme1 # Two databases db2 = audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id']", "'segments', [ str(i).zfill(3) + f'_{j}' for i in range(1, 11) for j in", "'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f3'], [0,", "def test_duration(obj, expected_duration): duration = audformat.utils.duration(obj) if pd.isnull(expected_duration): assert pd.isnull(duration) else: assert duration", "'3s']), ], names=['file', 'start', 'end'], ), ), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series(", "False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index(pytest.DB.files[:2]),", "pd.DataFrame( { 'c': [2., 3.] }, audformat.filewise_index(['f2', 'f3']), ), ], False, pd.DataFrame( {", "], audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f2'), name='c2'), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2':", "[ pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1',", "None, marks=pytest.mark.xfail(raises=ValueError) ) ] ) def test_map_language(language, expected): assert utils.map_language(language) == expected @pytest.mark.parametrize('csv,result',", "), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pytest.FILE_DUR], ), ), #", "audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ),", "pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.segmented_index( ['f1',", "pd.to_timedelta(['1s', '2s', '3s']), ], names=['file', 'start', 'end'], ), ), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0", "), ( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2':", "( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(), ), ( [ audformat.segmented_index(),", "files) file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in files] assert file_names == expected_file_names", "[ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2',", "combine values with matching dtype ( [ pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='int64',", "where end == NaT files = result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask =", "audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1',", "( [], [], ), ( (['a'], ['b']), ['a', 'b'], ), ( (['a'], ['b',", "), pd.Series( ['a', np.nan, 'd'], audformat.filewise_index(['f1', 'f2', 'f4']), name='c2', ), pd.DataFrame( { 'c1':", "2], 'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ] )", "index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ),", "'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ( [ audformat.segmented_index(['f1', 'f2'],", "0, 'b': 1, 'c': 2}, ), ( [{'a': 0, 'b': 1}, {'b': 1,", "2}, ), ( [{'a': 0}, {'a': 1}, {'a': 2}], {'a': 2}, ), pytest.param(", "marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented index with NaT, but file is missing pytest.param( audformat.segmented_index(['f1'],", "elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index, extension, pattern, expected_index',", "), # combine series and data frame ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1',", "[2, 2]), pd.Timedelta(3, unit='s'), ), ( pd.Series( index=audformat.segmented_index(['f1'], [1], [2]), dtype='category', ), pd.Timedelta(1,", "audformat.utils.read_csv(csv) if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result) elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj,", "audformat.filewise_index(), ), ( audformat.segmented_index(), 'mp3', None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None,", "expected assert utils.hash(obj[::-1]) == expected @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ),", "index=audformat.filewise_index(['f1', 'f2', 'f3']), ) ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2',", "( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pd.NaT, pd.NaT], ), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR", "( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'],", "None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ),", "'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), columns=['value1', 'value2'], ), ), pytest.param(", "index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ] ) def test_to_segmented_index(obj, allow_nat, files_duration,", "audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0,", "1, 1], [1, 1, 2, 2], ), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ],", "2], audformat.filewise_index(['f1', 'f2']), dtype='int64', ), pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ],", "data frame ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c', ), pd.DataFrame( {", "audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(),", "audformat.filewise_index(), None, audformat.filewise_index(), ), ( audformat.segmented_index(), None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1', 'f2']), '.',", "audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f2'), ]", "'1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ), name='value', ), ), ( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0", "), # provide file durations ( audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR *", "import pytest import audeer import audformat from audformat import utils from audformat import", "dtype='object')], False, pd.Series([], audformat.filewise_index(), dtype='object') ), ( [pd.Series([], audformat.segmented_index(), dtype='object')], False, pd.Series([], audformat.segmented_index(),", "[ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), # combine series", "'c2': [np.nan, 2.], }, audformat.segmented_index( ['f1', 'f1'], [0, 0], [None, 1], ), ),", "] ) def test_to_filewise(output_folder, table_id, expected_file_names): has_existed = os.path.exists(output_folder) frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(),", "), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), #", "audformat.filewise_index('f2'), name='c2'), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], },", "'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1',", "0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ),", "), ), ( [ pd.Series( True, audformat.filewise_index('f1'), dtype='bool', ), pd.Series( True, audformat.filewise_index('f2'), dtype='bool',", "dtype='object')], False, pd.DataFrame([], audformat.segmented_index(), dtype='object') ), # combine series with same name (", "pd.Series( True, audformat.filewise_index('f1'), dtype='bool', ), pd.Series( True, audformat.filewise_index('f2'), dtype='bool', ), ], False, pd.Series(", "( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), '-3831446135233514455',", "'mp3', None, audformat.filewise_index(), ), ( audformat.segmented_index(), 'mp3', None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']),", "audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2',", "'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), # error:", "scheme3 expected = audformat.Scheme(labels={'a': [4], 'b': [3]}) audformat.utils.join_schemes([db1, db2, db3], 'scheme_id') # Fail", "index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category',", "True, audformat.filewise_index('f1'), dtype='bool', ), pd.Series( True, audformat.filewise_index('f2'), dtype='bool', ), ], False, pd.Series( True,", "{ 'c1': [1., 2., 3., np.nan], 'c2': ['a', 'b', 'c', 'd'] }, audformat.segmented_index(['f1',", "pd.Series( index=audformat.segmented_index(['f1'], [1], [2]), dtype='category', ), pd.Timedelta(1, unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])),", "{'a': 1}, {'a': 2}], {'a': 2}, ), pytest.param( ['a', 'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError),", "else: if table_id == 'segments': for f in frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f): os.remove(f)", "0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "= audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] = scheme3 expected = audformat.Scheme(labels={'a': [4],", "[ ( audformat.filewise_index(), 'mp3', None, audformat.filewise_index(), ), ( audformat.segmented_index(), 'mp3', None, audformat.segmented_index(), ),", "marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad language', None, marks=pytest.mark.xfail(raises=ValueError) ) ] ) def test_map_language(language, expected):", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [", "in files) file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in files] assert file_names ==", "[['a', 'b'], ['b', 'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0: {'age': 20}}, {'0':", "['a', 'b'], ), ( (['a'], ['b', 'c']), ['a', 'b', 'c'], ), ( (['a'],", "expected): assert utils.map_language(language) == expected @pytest.mark.parametrize('csv,result', [ ( StringIO('''file f1 f2 f3'''), pd.Index(", "( [pd.Series([], audformat.segmented_index(), dtype='object')], False, pd.Series([], audformat.segmented_index(), dtype='object') ), ( [pd.DataFrame([], audformat.segmented_index(), dtype='object')],", "file is missing pytest.param( audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] ) def test_duration(obj,", "2, audformat.filewise_index('f2'), dtype='int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ),", "and overwrite values ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], True, pd.Series([1.], audformat.filewise_index('f1')),", "None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( [1, 2, 3], index=audformat.filewise_index(['f1', 'f2', 'f3']), ),", "), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), False, None, pytest.DB_ROOT,", "], audformat.filewise_index(['f1', 'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ],", "audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some',", "'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')),", "from audformat import utils from audformat import define @pytest.mark.parametrize( 'objs, overwrite, expected', [", "), ( audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2, unit='s'), ), ( audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9,", "( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "root, expected', [ # empty ( audformat.filewise_index(), True, None, None, audformat.segmented_index(), ), (", "1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s',", "pytest.param( '.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), (", "[0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, 1], ), ), (", "None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), True,", "audformat.segmented_index(), dtype='object')], False, pd.DataFrame([], audformat.segmented_index(), dtype='object') ), # combine series with same name", "), pytest.param( [['a', 'b'], ['b', 'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0: {'age':", "], False, pd.Series([1.], audformat.filewise_index('f1')), ), # combine series and overwrite values ( [", "1]), ], audformat.segmented_index( ['f1', 'f2', 'f3'], [0, 0, 0], [1, 1, 1], ),", "audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id'] == scheme1 #", "[{'a': 0}], {'a': 0}, ), ( [{'a': 0}, {'b': 1}], {'a': 0, 'b':", "'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), ], False, pd.Series(", "audformat.utils.duration(obj) if pd.isnull(expected_duration): assert pd.isnull(duration) else: assert duration == expected_duration @pytest.mark.parametrize( 'index, root,", "'d'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0: {'age': 20}}, {'0': {'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError),", "'c'], ), ( (['a'], ['a']), ['a'], ), ( [{'a': 0}], {'a': 0}, ),", "not allow_nat: # for filewise tables we expect a duration for every file", "audformat.filewise_index(['f1.wav', 'f2.wav']), '', None, audformat.filewise_index(['f1', 'f2']), ), ( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3',", "audeer.safe_path('f2'), ], ['1s', '3s'], ['2s', '4s'], ), ) ] ) def test_expand_file_path(tmpdir, index,", "[1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), # combine series with different names", "), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']),", "expected): assert utils.hash(obj) == expected assert utils.hash(obj[::-1]) == expected @pytest.mark.parametrize( 'objs, expected', [", "pd.testing.assert_index_equal(obj, result) elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index, extension,", "0, 'b': 2, 'c': 2}, ), ( [{'a': 0}, {'a': 1}, {'a': 2}],", "] ) def test_to_segmented_index(obj, allow_nat, files_duration, root, expected): result = audformat.utils.to_segmented_index( obj, allow_nat=allow_nat,", "), ( [pd.Series([], audformat.filewise_index(), dtype='object')], False, pd.Series([], audformat.filewise_index(), dtype='object') ), ( [pd.Series([], audformat.segmented_index(),", "pd.isnull(duration) else: assert duration == expected_duration @pytest.mark.parametrize( 'index, root, expected', [ ( audformat.filewise_index(),", "expected # Three database db3 = audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id'] =", "[ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]),", "audformat.filewise_index('f1'), name='c1'), pd.Series( [2.], audformat.segmented_index('f1', 0, 1), name='c2', ), ], False, pd.DataFrame( {", "allow nat ( audformat.filewise_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]), True, None,", "[ audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s', '3s'], ['2s', '4s'], ), ) ] ) def", "{ os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR,", "'f2', 'f3', 'f4']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2',", "1)[0] for f in files] assert file_names == expected_file_names # clean-up if not", "[], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a', 'b'], ['b', 'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param(", ") if not isinstance(result, pd.Index): result = result.index pd.testing.assert_index_equal(result, expected) if files_duration and", "pd.NaT, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "pattern, expected_index', [ ( audformat.filewise_index(), 'mp3', None, audformat.filewise_index(), ), ( audformat.segmented_index(), 'mp3', None,", "0, 1, 1], [1, 1, 2, 2], ), ), ( [ audformat.filewise_index(), audformat.segmented_index(),", "import shutil import numpy as np import pandas as pd import pytest import", "and data frame ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c', ), pd.DataFrame(", "pytest.param( ('a', 'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a': 0, 'b': 1}, ['c']],", "name='file', ), ), ( StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.filewise_index(['f1',", "* 2], ), ), # file not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None, None,", "match pytest.param( [ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param(", "f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1', 'f1', 'f2'], pd.to_timedelta(['0s', '1s', '2s']), pd.to_timedelta(['1s', '2s',", "root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj, expected', [ ( audformat.filewise_index(), '0', ), ( audformat.segmented_index(),", "pd.NaT files = files[mask] for file in files: file = os.path.join(root, file) assert", "pd.DataFrame( { 'c1': [1., 2.], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ),", "marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] ) def test_duration(obj, expected_duration): duration = audformat.utils.duration(obj) if pd.isnull(expected_duration): assert", "), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), (", "index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ) ), # combine series with non-nullable dtype (", "else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj, expected_duration', [ ( audformat.segmented_index(), pd.Timedelta(0, unit='s'), ), (", "0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1',", "), pytest.param( ['a', 'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a', 'b', 'c'), [],", "None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp', 'segments', [", "in files: file = os.path.join(root, file) assert file in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [", "pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad language', None, marks=pytest.mark.xfail(raises=ValueError) ) ] )", "), ], False, pd.DataFrame( { 'c1': [1., 2., 3., np.nan], 'c2': ['a', 'b',", "expected_file_names): has_existed = os.path.exists(output_folder) frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, ) assert", "pd.Series): pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj, expected_duration', [ ( audformat.segmented_index(), pd.Timedelta(0,", "'f1'], [0, 0], [None, 1], ), ), ), # combine series and data", "'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ) ), # combine series with non-nullable", "if files_duration and not allow_nat: # for filewise tables we expect a duration", "# error: values do not match pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ],", "0, 'b': 1}, ), ( [{'a': 0}, {'b': 1, 'c': 2}], {'a': 0,", "pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2,", "@pytest.mark.parametrize('csv,result', [ ( StringIO('''file f1 f2 f3'''), pd.Index( ['f1', 'f2', 'f3'], name='file', ),", "[ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where')) + os.path.sep", "], ['1s', '3s'], ['2s', '4s'], ), ) ] ) def test_expand_file_path(tmpdir, index, root,", "pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), # combine series and", "db = audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language, expected', [ ('en',", "0, 0], [pd.NaT, 1, pd.NaT, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'],", "), ( [ audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1',", "'f2.wav']), ), ] ) def test_replace_file_extension(index, extension, pattern, expected_index): index = audformat.utils.replace_file_extension( index,", "'-3831446135233514455', ), ( pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ), ( pd.DataFrame( {'a': [0,", "[0.2, pd.NaT], ), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pytest.FILE_DUR], ),", "os.path.sep + audeer.safe_path('f2'), ] ), ), ( audformat.segmented_index( ['f1', 'f2'], ['1s', '3s'], ['2s',", "None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), (", "+ os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ),", "( audformat.segmented_index(['f1', 'f2'], [0, 1], [2, 2]), pd.Timedelta(3, unit='s'), ), ( pd.Series( index=audformat.segmented_index(['f1'],", "2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ], False, pd.Series([1.,", "], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']), ), (", "= audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] = scheme2 expected = audformat.Scheme(labels={'a': [1,", "db2.schemes['scheme_id'] == expected # Three database db3 = audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a': [4]})", "duration for every file # for segmented only where end == NaT files", "[pd.NaT, 1, pd.NaT, 1, pd.NaT], ), ), ] ) def test_union(objs, expected): pd.testing.assert_index_equal(", "audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), ] ) def test_duration(obj, expected_duration): duration = audformat.utils.duration(obj)", "), ( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3',", "[0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index('f2', 0,", "[0, 1], 'b': [2, 3]}, audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), ), '-103439349488189352',", "result) @pytest.mark.parametrize( 'index, extension, pattern, expected_index', [ ( audformat.filewise_index(), 'mp3', None, audformat.filewise_index(), ),", "'f3'], [0, 0], [1, 1]), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1',", "pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5],", "None, marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp', 'segments', [ str(i).zfill(3) + f'_{j}' for i in", "def test_to_filewise(output_folder, table_id, expected_file_names): has_existed = os.path.exists(output_folder) frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder,", "pd.NaT], ), True, None, None, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), ),", "'-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), '-3831446135233514455', ), ( pd.Series([0,", "), pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ], False, pd.Series( [1., 2.],", "# allow nat ( audformat.filewise_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]), True,", "'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1.], audformat.filewise_index('f1'),", "db2], 'scheme_id') assert db1.schemes['scheme_id'] == expected assert db2.schemes['scheme_id'] == expected # Three database", "'c1': [np.nan, 3.], 'c2': ['b', 'c'], }, audformat.segmented_index(['f2', 'f3']), ), ], False, pd.DataFrame(", "'where')) + os.path.sep + audeer.safe_path('f2'), ] ), ), ( audformat.segmented_index( ['f1', 'f2'], ['1s',", "), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ), ( [", "audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), (", "( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None,", "name='value', ), ), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0,", "), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), True, None, None, audformat.segmented_index(", "2., 3., np.nan], 'c2': ['a', 'b', 'c', 'd'] }, audformat.segmented_index(['f1', 'f2', 'f3', 'f4']),", "0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0],", "pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ) ), ( [ pd.Series( ['a',", "# Empty list audformat.utils.join_schemes([], 'scheme_id') # One database db1 = audformat.Database('db1') scheme1 =", "db2 = audformat.Database('db2') scheme2 = audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] = scheme2 expected = audformat.Scheme(labels={'a':", "audeer.safe_path('f2'), ] ), ), ( audformat.segmented_index( ['f1', 'f2'], ['1s', '3s'], ['2s', '4s'], ),", "'f4']), ), ), # error: dtypes do not match pytest.param( [ pd.Series([1], audformat.filewise_index('f1')),", "] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where') + os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some',", "pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], True, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')),", "audformat.segmented_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c1': [1., 2., 3., np.nan], 'c2':", "marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad language', None, marks=pytest.mark.xfail(raises=ValueError) )", "None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ] ) def test_to_segmented_index(obj, allow_nat, files_duration, root, expected): result", "audformat.filewise_index('f1')), ), # combine series and overwrite values ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan],", "audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3',", "( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a',", "if os.path.exists(output_folder): shutil.rmtree(output_folder) else: if table_id == 'segments': for f in frame.index.get_level_values( define.IndexField.FILE):", "table_id == 'files': # files of unprocessed frame are relative to `root` files", "( [ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([np.nan], audformat.filewise_index('f1')), ), ( [", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index('f2'), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']),", "True, None, None, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), ), # forbid", "[1, 2], audformat.filewise_index(['f1', 'f2']), dtype='int64', ), pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ),", "= result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask = result.get_level_values( audformat.define.IndexField.END ) == pd.NaT", "'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.segmented_index( ['f1', 'f1', 'f2'],", "audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ],", "overwrite, expected): obj = utils.concat(objs, overwrite=overwrite) if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj,", ") def test_concat(objs, overwrite, expected): obj = utils.concat(objs, overwrite=overwrite) if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj,", "1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1',", "f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'],", "= audformat.utils.read_csv(csv) if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result) elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result) else:", "( audformat.segmented_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2,", "( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], True, pd.Series([1.], audformat.filewise_index('f1')), ), ( [", "0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index( ['f1', 'f2',", "[1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1',", "( audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where'))", "dtypes do not match pytest.param( [ pd.Series([1], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, None,", "), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.segmented_index(", "expected) @pytest.mark.parametrize( 'obj, expected', [ ( audformat.filewise_index(), '0', ), ( audformat.segmented_index(), '0', ),", "audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9, unit='s'), ), ( audformat.segmented_index(['f1', 'f2'], [0, 1], [2, 2]),", "audformat.filewise_index('f2'), dtype='bool', ), ], False, pd.Series( True, audformat.filewise_index(['f1', 'f2']), dtype='boolean', ), ), (", "[ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( ['a', np.nan, 'd'], audformat.filewise_index(['f1',", "pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), ), # forbid nat ( audformat.filewise_index(pytest.DB.files[:2]), False,", "( (['a'], ['a']), ['a'], ), ( [{'a': 0}], {'a': 0}, ), ( [{'a':", "= audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language, expected', [ ('en', 'eng'),", "audformat.segmented_index( ['f1', 'f1', 'f2', 'f3'], [0, 0, 0, 0], [pd.NaT, 1, 1, 1],", "audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'),", "pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented index with NaT, but file is", "'2s', '3s']), ], names=['file', 'start', 'end'], ), ), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''),", "'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1', 'f2',", "pytest.DB.files[:2], [0.1, 0.5], [pd.NaT, pd.NaT], ), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2,", "db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id'] == scheme1 # Two databases db2", "shutil.rmtree(output_folder) else: if table_id == 'segments': for f in frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f):", "[ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], True, pd.Series([2.], audformat.filewise_index('f1')), ), # combine values", "audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1.], audformat.filewise_index('f1'), name='c1'), pd.Series( [2.], audformat.segmented_index('f1',", "relative to `root` files = [os.path.join(pytest.DB_ROOT, f) for f in files] assert all(os.path.exists(f)", "[ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), (", "], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ),", "f3'''), pd.Index( ['f1', 'f2', 'f3'], name='file', ), ), ( StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''),", "= audformat.Scheme(labels={'a': [4], 'b': [3]}) audformat.utils.join_schemes([db1, db2, db3], 'scheme_id') # Fail for schemes", "series with different names ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ],", "1}, {'b': 2, 'c': 2}], {'a': 0, 'b': 2, 'c': 2}, ), (", "), ( audformat.segmented_index(), True, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), False, None, None,", "audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(), ),", "'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0, 0,", "[1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64', ),", "1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(), ), ( [", "[0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'],", "from audformat import define @pytest.mark.parametrize( 'objs, overwrite, expected', [ # empty ( [],", "( audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2, unit='s'), ), ( audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9, unit='s'),", "'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index( ['f1', 'f1',", "[ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), ], audformat.filewise_index(['f1', 'f2']), ), ( [ audformat.filewise_index(['f1', 'f2']),", "name='c1'), pd.Series( [2.], audformat.segmented_index('f1', 0, 1), name='c2', ), ], False, pd.DataFrame( { 'c1':", "root, expected', [ ( audformat.filewise_index(), None, audformat.filewise_index(), ), ( audformat.segmented_index(), None, audformat.segmented_index(), ),", "('en', 'eng'), ('english', 'eng'), ('English', 'eng'), pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx',", "def test_to_segmented_index(obj, allow_nat, files_duration, root, expected): result = audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration, root=root,", "audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ),", "utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, ) assert audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True),", "True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), (", "), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0,", "), # forbid nat ( audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0],", "( audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), ), ( audformat.filewise_index(['f1',", "'b': 1, 'c': 2}, ), ( [{'a': 0, 'b': 1}, {'b': 2, 'c':", "( [ audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(), ),", "2}], {'a': 2}, ), pytest.param( ['a', 'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a',", "unit='s'), ), ( audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2, unit='s'), ), ( audformat.segmented_index(['f1'], [0.1], [2]),", "None, audformat.segmented_index(pytest.DB.files[:2]), ), ] ) def test_to_segmented_index(obj, allow_nat, files_duration, root, expected): result =", "pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( ['a', np.nan, 'd'], audformat.filewise_index(['f1', 'f2',", "values in same location ( [ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([np.nan],", "audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index( ['f1', 'f2', 'f2', 'f3'], [0,", "), ), # combine series with different names ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'),", "0], [1, 1]), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0,", "{'a': [0, 1], 'b': [2, 3]}, audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), ),", ") def test_map_language(language, expected): assert utils.map_language(language) == expected @pytest.mark.parametrize('csv,result', [ ( StringIO('''file f1", "'f2.mp3']), ), ( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']),", "= utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, ) assert audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True),", "1}, {'a': 2}], {'a': 2}, ), pytest.param( ['a', 'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError), ),", "index, root, expected): expanded_index = audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj, expected', [", "[], ), ( (['a'], ['b']), ['a', 'b'], ), ( (['a'], ['b', 'c']), ['a',", "f) for f in files] assert all(os.path.exists(f) for f in files) file_names =", "obj = audformat.utils.read_csv(csv) if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result) elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result)", "frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT, output_folder=output_folder, num_workers=3, ) assert audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal(", "1), name='c2', ), ], False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.],", "pd.NaT], ), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pytest.FILE_DUR], ), ),", "os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp', 'segments', [ str(i).zfill(3) + f'_{j}' for", "[0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT],", ") ] ) def test_map_language(language, expected): assert utils.map_language(language) == expected @pytest.mark.parametrize('csv,result', [ (", "'2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ), name='value', ), ), ( StringIO('''file,end,value f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''),", "'f2'])), ), ( [ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1',", "'f3'], [1, 1], [2, 2]), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ],", "empty ( [], False, pd.Series([], audformat.filewise_index(), dtype='object'), ), ( [pd.Series([], audformat.filewise_index(), dtype='object')], False,", "False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ],", "'where') + os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ),", "( [ audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']),", "dtype='object') ), # combine series with same name ( [ pd.Series([], audformat.filewise_index(), dtype=float),", "None, audformat.segmented_index(), ), ( audformat.filewise_index(), False, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), True,", "expected @pytest.mark.parametrize('csv,result', [ ( StringIO('''file f1 f2 f3'''), pd.Index( ['f1', 'f2', 'f3'], name='file',", "+ os.path.sep + audeer.safe_path('f2'), ] ), ), ( audformat.segmented_index( ['f1', 'f2'], ['1s', '3s'],", "'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(['f1', 'f2', 'f3', 'f4']),", "), ( audformat.segmented_index(), 'mp3', None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3',", "if isinstance(result, pd.Index): pd.testing.assert_index_equal(obj, result) elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj, result)", "True, audformat.filewise_index(['f1', 'f2']), dtype='boolean', ), ), ( [ pd.Series( 1, audformat.filewise_index('f1'), dtype='int64', ),", "audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where')) +", "f1 f2 f3'''), pd.Index( ['f1', 'f2', 'f3'], name='file', ), ), ( StringIO('''file,value f1,0.0", "), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series( [1., 2.],", "False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1.,", "pd.Timedelta(1, unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1, unit='s'), ), # filewise index,", "+ os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f2'), ] ), ),", "StringIO('''file f1 f2 f3'''), pd.Index( ['f1', 'f2', 'f3'], name='file', ), ), ( StringIO('''file,value", "None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(", "), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f1')), ], True, pd.Series([2.], audformat.filewise_index('f1')), ), #", "unit='s'), ), ( audformat.segmented_index(['f1'], [0.1], [2]), pd.Timedelta(1.9, unit='s'), ), ( audformat.segmented_index(['f1', 'f2'], [0,", "audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'), audformat.filewise_index(", "# empty ( [], False, pd.Series([], audformat.filewise_index(), dtype='object'), ), ( [pd.Series([], audformat.filewise_index(), dtype='object')],", "pytest.FILE_DUR * 2], ), ), # file not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None,", "'f2', 'f3']), name='value', ), ), ( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame( { 'value1':", "'f2'])), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64' ), ), ( [", "audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "['a', 'b', 'c'], }, index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s',", "2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1.,", "[ pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.],", "( pd.DataFrame( {'int': [1, 2], 'str': ['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None,", "'objs, expected', [ ( [], audformat.filewise_index(), ), ( [ audformat.filewise_index(), ], audformat.filewise_index(), ),", "2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.],", "audformat from audformat import utils from audformat import define @pytest.mark.parametrize( 'objs, overwrite, expected',", "{ 'c1': [1.], 'c2': [2.], }, audformat.filewise_index('f1'), ), ), ( [ pd.Series([1.], audformat.filewise_index('f1'),", "[ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), (", "audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1,", "[pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0],", "[0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ],", "'2s']), pd.to_timedelta(['1s', '2s', '3s']), ], names=['file', 'start', 'end'], ), ), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0", "[ ( audformat.segmented_index(), pd.Timedelta(0, unit='s'), ), ( audformat.segmented_index(['f1'], [0], [2]), pd.Timedelta(2, unit='s'), ),", "audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ),", "], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']),", "pd.Series([1.], audformat.filewise_index('f1')), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [ pd.Series( [1, 2, 3],", "'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'],", "( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1', 'f1', 'f2'], pd.to_timedelta(['0s', '1s', '2s']),", "[ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']),", "pd.Series( [1, 2, 3], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1',", "[ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( [2.], audformat.filewise_index('f2'), name='c2', ),", "(['a'], ['b', 'c']), ['a', 'b', 'c'], ), ( (['a'], ['a']), ['a'], ), (", "[ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1',", "audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f3'], [0, 0, 0, 0], [pd.NaT, 1,", "audformat.segmented_index(), ), # allow nat ( audformat.filewise_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), (", "), ( audformat.segmented_index(), None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1', 'f2']), '.', audformat.filewise_index( [ audeer.safe_path('f1'),", "[0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2', 0, 1), ), ( [", "files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param( '.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''), 'segments',", "( [ pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series(", "2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), name='value',", "0, 1), ), ] ) def test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize(", "2.0], index=audformat.filewise_index(['f1', 'f2', 'f3']), name='value', ), ), ( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame(", "names=['file', 'start', 'end'], ), ), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0,", "), ( StringIO('''file,start,end,value f1,00:00:00,00:00:01,0.0 f1,00:00:01,00:00:02,1.0 f2,00:00:02,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1',", "1, pd.NaT], ), ), ] ) def test_union(objs, expected): pd.testing.assert_index_equal( audformat.utils.union(objs), expected, )", "audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where')) + os.path.sep +", "True, pd.Series([2.], audformat.filewise_index('f1')), ), # combine values with matching dtype ( [ pd.Series(", "), ], False, pd.DataFrame( { 'c1': [1., 2.], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1',", "pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')),", "False, pd.DataFrame( { 'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.segmented_index( ['f1', 'f1'],", "), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a',", "'-8245754232361677810', ), ( pd.DataFrame( {'a': [0, 1], 'b': [2, 3]}, audformat.segmented_index(['f1', 'f2'], [0,", "'f1', 'f2'], starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]), ), name='value', ), ), (", "[3]}) db2.schemes['scheme_id'] = scheme2 expected = audformat.Scheme(labels={'a': [1, 2], 'b': [3]}) audformat.utils.join_schemes([db1, db2],", "[1, 1]), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "[0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2],", "end == NaT files = result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask = result.get_level_values(", "# combine values with matching dtype ( [ pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']),", "audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ), ( pd.DataFrame( {'a': [0, 1], 'b': [2, 3]}, audformat.segmented_index(['f1',", "'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2', 0, 1),", "'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2'], [0, 0],", "False, pd.DataFrame([], audformat.segmented_index(), dtype='object') ), # combine series with same name ( [", "False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index(", "1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1', 'value2'], ),", "file_names == expected_file_names # clean-up if not has_existed: # output folder was created", "in files] assert file_names == expected_file_names # clean-up if not has_existed: # output", "), ( (['a'], ['b']), ['a', 'b'], ), ( (['a'], ['b', 'c']), ['a', 'b',", "( [ pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='int64', ), pd.Series( [1, 2], audformat.filewise_index(['f1',", "}, audformat.filewise_index(['f1', 'f2', 'f3']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']),", "2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series( [2.], audformat.filewise_index('f2'), name='c2', ), ], False, pd.DataFrame(", "== 'files': # files of unprocessed frame are relative to `root` files =", "in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param( '.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''),", "] ), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some',", "audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), True, None, None,", "[], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a', 'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a': 0,", "pd.NaT], ), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2],", "audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ],", "[pd.DataFrame([], audformat.segmented_index(), dtype='object')], False, pd.DataFrame([], audformat.segmented_index(), dtype='object') ), # combine series with same", "( audformat.segmented_index(), False, None, None, audformat.segmented_index(), ), # allow nat ( audformat.filewise_index(pytest.DB.files[:2]), True,", "[1., np.nan], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series(", "audformat.segmented_index( ['f1', 'f2', 'f2', 'f3'], [0, 0, 1, 1], [1, 1, 2, 2],", "audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [", "'f2']), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']),", "'value1': [0.0, 1.0, 2.0], 'value2': ['a', 'b', 'c'], }, index=audformat.filewise_index(['f1', 'f2', 'f3']), columns=['value1',", "marks=pytest.mark.xfail(raises=ValueError), ), ], ) def test_concat(objs, overwrite, expected): obj = utils.concat(objs, overwrite=overwrite) if", "pd.Series( [2.], audformat.filewise_index('f2'), name='c2', ), ], False, pd.DataFrame( { 'c1': [1., 2.], 'c2':", "), ( audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some',", "pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index, extension, pattern, expected_index', [ ( audformat.filewise_index(),", "audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ),", "pd.Series( ['a', 'b', 'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError),", "'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1',", "same name ( [ pd.Series([], audformat.filewise_index(), dtype=float), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ], False,", "define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files = frame.index.get_level_values(define.IndexField.FILE).values if table_id == 'segmented': #", "False, pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), # combine values in same location (", "( audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ),", "result = audformat.utils.to_segmented_index( obj, allow_nat=allow_nat, files_duration=files_duration, root=root, ) if not isinstance(result, pd.Index): result", "0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, pd.NaT], ), ), ] )", "( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(), ),", "'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1., 2.], audformat.filewise_index(['f1',", "marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a': 0, 'b': 1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [['a',", "'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''), 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), ( 'tmp', 'segments',", "expected', [ ( audformat.filewise_index(), '0', ), ( audformat.segmented_index(), '0', ), ( audformat.filewise_index(['f1', 'f2']),", "expected_index) @pytest.mark.parametrize( 'obj, allow_nat, files_duration, root, expected', [ # empty ( audformat.filewise_index(), True,", "1]), '-3831446135233514455', ), ( pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])), '-8245754232361677810', ), ( pd.DataFrame( {'a':", "), ( audformat.segmented_index(['f1', 'f2'], [0, 1], [2, 2]), pd.Timedelta(3, unit='s'), ), ( pd.Series(", "files] assert all(os.path.exists(f) for f in files) file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f", "'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ],", "define @pytest.mark.parametrize( 'objs, overwrite, expected', [ # empty ( [], False, pd.Series([], audformat.filewise_index(),", "None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series and frame ( pd.Series( [1, 2],", "] ) def test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, ) @pytest.mark.parametrize( 'labels, expected', [", "== expected # Three database db3 = audformat.Database('db3') scheme3 = audformat.Scheme(labels={'a': [4]}) db3.schemes['scheme_id']", "'labels, expected', [ ( [], [], ), ( (['a'], ['b']), ['a', 'b'], ),", "], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),", "NaT files = result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask = result.get_level_values( audformat.define.IndexField.END )", "'f1', 'f2', 'f3'], [0, 0, 0, 0], [pd.NaT, 1, 1, 1], ), ),", "'f3']), ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ) ),", "'f2']), name='c1', ), pd.Series( [2.], audformat.filewise_index('f2'), name='c2', ), ], False, pd.DataFrame( { 'c1':", "audformat.filewise_index('f2'), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(), ), (", "], audformat.filewise_index(), ), ( [ audformat.filewise_index(), audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(['f1',", "not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series and", "] ) def test_hash(obj, expected): assert utils.hash(obj) == expected assert utils.hash(obj[::-1]) == expected", "pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float64',", "dtype='float64', ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), dtype='float32', ), pd.Series(", "[{'a': 0}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1, 'c': 2}, ),", "# combine values in same location ( [ pd.Series([np.nan], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ],", "'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ),", "], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0, 0, 0, 0], [pd.NaT,", "dtype ( [ pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='int64', ), pd.Series( [1, 2],", "dtype='object') ), ( [pd.Series([], audformat.segmented_index(), dtype='object')], False, pd.Series([], audformat.segmented_index(), dtype='object') ), ( [pd.DataFrame([],", "'f2']), dtype='Int64', ), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ),", "audformat.filewise_index(['f1', 'f2']), name='c', ), pd.DataFrame( { 'c': [2., 3.] }, audformat.filewise_index(['f2', 'f3']), ),", "'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1',", "), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(['f1',", "( audformat.segmented_index(), '0', ), ( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215',", "audformat.filewise_index(), ), ( [ audformat.filewise_index(), ], audformat.filewise_index(), ), ( [ audformat.filewise_index(), audformat.filewise_index(), ],", "'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), ( audformat.filewise_index(['f1', 'f2.wv']),", "name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ], False, pd.DataFrame( { 'c1': [1.], 'c2': [2.], },", "0, 0, 0], [pd.NaT, 1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'],", "'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2',", "* 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ),", "'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), columns=['value1', 'value2'], ), ),", "'f3'], [1, 1], [2, 2]), ], audformat.segmented_index( ['f1', 'f2', 'f2', 'f3'], [0, 0,", "[1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1,", "), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ) ),", "), name='value', ), ), ( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [ ['f1', 'f1',", "}, audformat.segmented_index( ['f1', 'f1'], [0, 0], [None, 1], ), ), ), # combine", "['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ),", "audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), # file not", "audformat.filewise_index(['f2', 'f3']), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']),", "1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2', 0, 1), ), ] ) def", "@pytest.mark.parametrize( 'output_folder,table_id,expected_file_names', [ pytest.param( '.', 'segments', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( os.path.abspath(''), 'segments', None,", "overwrite values ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], True, pd.Series([1.], audformat.filewise_index('f1')), ),", "assert utils.hash(obj[::-1]) == expected @pytest.mark.parametrize( 'objs, expected', [ ( [], audformat.filewise_index(), ), (", "False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pytest.FILE_DUR], ), ), # provide", "expected) else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj, expected_duration', [ ( audformat.segmented_index(), pd.Timedelta(0, unit='s'), ),", "[ ('en', 'eng'), ('en', 'eng'), ('english', 'eng'), ('English', 'eng'), pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError)", "range(1, 11) for j in range(10) ] ), ( 'tmp', 'files', [str(i).zfill(3) for", "audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id']", "0.0], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pd.NaT,", "), pd.Series( [2.], audformat.filewise_index('f2'), name='c2', ), ], False, pd.DataFrame( { 'c1': [1., 2.],", "[ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index(", "], False, pd.Series([np.nan], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False,", "[0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, pd.NaT], ), ), ]", "'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0: {'age': 20}}, {'0': {'age': 30}}], [],", "'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']), ), ( audformat.segmented_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']),", "pandas as pd import pytest import audeer import audformat from audformat import utils", "audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), ( audformat.segmented_index( pytest.DB.files[:2],", "pytest.FILE_DUR], ), ), # provide file durations ( audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]):", ") def test_replace_file_extension(index, extension, pattern, expected_index): index = audformat.utils.replace_file_extension( index, extension, pattern=pattern, )", "different names ( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ], False, pd.DataFrame(", "), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where',", "index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '0s', '0s'], ends=['1s', '2s', '3s'], ), name='value', ),", "np.nan, 'd'], audformat.filewise_index(['f1', 'f2', 'f4']), name='c2', ), pd.DataFrame( { 'c1': [np.nan, 3.], 'c2':", "] ), ), ( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')),", "expected', [ ( [], [], ), ( (['a'], ['b']), ['a', 'b'], ), (", "One database db1 = audformat.Database('db1') scheme1 = audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id'] = scheme1", "audformat.segmented_index(), ), ( audformat.segmented_index(), True, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), False, None,", "None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), True, None,", "( [ audformat.filewise_index(), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2'],", "clean-up if not has_existed: # output folder was created and can be removed", "columns=['value1', 'value2'], ), ), ( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0, 2.0],", "'f2']), dtype='Int64' ), ), ( [ pd.Series( True, audformat.filewise_index('f1'), dtype='bool', ), pd.Series( True,", "0.5], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), # file not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]),", "file in files: file = os.path.join(root, file) assert file in files_duration @pytest.mark.parametrize( 'output_folder,table_id,expected_file_names',", "audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ],", "f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '0s', '0s'],", "'-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ]", "1, 'c': 2}, ), ( [{'a': 0, 'b': 1}, {'b': 1, 'c': 2}],", "'c1': [1., np.nan], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [", "segmented only where end == NaT files = result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED:", "1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]),", "'2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end f1,00:00:00,00:00:01 f1,00:00:01,00:00:02 f2,00:00:02,00:00:03'''), pd.MultiIndex.from_arrays( [", "0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2',", "[{'a': 0, 'b': 1}, {'b': 1, 'c': 2}], {'a': 0, 'b': 1, 'c':", "columns=['value1', 'value2'], ), ), pytest.param( StringIO('''value 0.0 1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) ) ])", "audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2', 'f3']), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(),", "( audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2],", "2.], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.,", "'f2', 'f3']), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(),", "file not found pytest.param( audformat.filewise_index(pytest.DB.files[:2]), False, None, None, None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # series", "'scheme_id') @pytest.mark.parametrize( 'language, expected', [ ('en', 'eng'), ('en', 'eng'), ('english', 'eng'), ('English', 'eng'),", "( audformat.filewise_index(), True, None, None, audformat.segmented_index(), ), ( audformat.filewise_index(), False, None, None, audformat.segmented_index(),", "isinstance(result, pd.Index): result = result.index pd.testing.assert_index_equal(result, expected) if files_duration and not allow_nat: #", "audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2',", "pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.segmented_index('f2')), ], False,", "'f2'])), ), ( [ pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ],", "1]), audformat.filewise_index('f1'), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "f in files] assert file_names == expected_file_names # clean-up if not has_existed: #", "'f2']), os.path.join('some', 'where'), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ),", "[2., 3.] }, audformat.filewise_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c': [1., 2.,", "'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1',", "pd.NaT, 1, pd.NaT], ), ), ] ) def test_union(objs, expected): pd.testing.assert_index_equal( audformat.utils.union(objs), expected,", "'scheme_id') assert db1.schemes['scheme_id'] == expected assert db2.schemes['scheme_id'] == expected # Three database db3", "audformat.utils.join_schemes([db1, db2, db3], 'scheme_id') # Fail for schemes without labels with pytest.raises(ValueError): db", "[0.1, 0.5], [0.2, pd.NaT], ), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2,", "'f2', 'f3'], [0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, pd.NaT], ),", "['a', 'b', 'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ),", "'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'],", "file durations ( audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT,", "audformat.segmented_index('f2', 0, 1), ), ] ) def test_intersect(objs, expected): pd.testing.assert_index_equal( audformat.utils.intersect(objs), expected, )", "audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), ( audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None, audformat.filewise_index(['f1',", "index = audformat.utils.replace_file_extension( index, extension, pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj, allow_nat, files_duration,", "0.5], [0.2, pd.NaT], ), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pytest.FILE_DUR],", "== audformat.define.IndexType.SEGMENTED: mask = result.get_level_values( audformat.define.IndexField.END ) == pd.NaT files = files[mask] for", "index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2',", "( [ pd.Series( 1, audformat.filewise_index('f1'), dtype='int64', ), pd.Series( 2, audformat.filewise_index('f2'), dtype='int64', ), ],", "durations ( audformat.filewise_index(pytest.DB.files[:2]), False, { os.path.join(pytest.DB_ROOT, pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index(", "[ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index( [", "[0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index( ['f1',", "audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s', '3s'], ['2s', '4s'], ), ) ] )", ") def test_to_filewise(output_folder, table_id, expected_file_names): has_existed = os.path.exists(output_folder) frame = utils.to_filewise_index( obj=pytest.DB[table_id].get(), root=pytest.DB_ROOT,", "), ( audformat.filewise_index(['f1', 'f2']), audeer.safe_path(os.path.join('some', 'where')), audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where',", "else: assert duration == expected_duration @pytest.mark.parametrize( 'index, root, expected', [ ( audformat.filewise_index(), None,", "dtype='Int64' ), ), ( [ pd.Series( True, audformat.filewise_index('f1'), dtype='bool', ), pd.Series( True, audformat.filewise_index('f2'),", "frame.index.get_level_values(define.IndexField.FILE).values if table_id == 'segmented': # already `framewise` frame is unprocessed assert os.path.isabs(output_folder)", "'f3']), ), ], False, pd.DataFrame( { 'c1': [1., 2., 3., np.nan], 'c2': ['a',", "audformat.filewise_index('f2'), name='c2', ), ], False, pd.DataFrame( { 'c1': [1., 2.], 'c2': [np.nan, 2.],", "overwrite=overwrite) if isinstance(obj, pd.Series): pd.testing.assert_series_equal(obj, expected) else: pd.testing.assert_frame_equal(obj, expected) @pytest.mark.parametrize( 'obj, expected_duration', [", "files_duration, root, expected', [ # empty ( audformat.filewise_index(), True, None, None, audformat.segmented_index(), ),", "expected): expanded_index = audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj, expected', [ ( audformat.filewise_index(),", "pd.DataFrame( {'a': [0, 1], 'b': [2, 3]}, audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),", "frame are relative to `root` files = [os.path.join(pytest.DB_ROOT, f) for f in files]", "), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1, unit='s'), ), # filewise index, but file", "'f3']), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(), audformat.segmented_index(), ],", "), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0],", "2]), ], audformat.segmented_index( ['f1', 'f2', 'f2', 'f3'], [0, 0, 1, 1], [1, 1,", "pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=pd.to_timedelta([pd.NaT, pd.NaT,", "values with matching dtype ( [ pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='int64', ),", "['b', 'c'], }, audformat.segmented_index(['f2', 'f3']), ), ], False, pd.DataFrame( { 'c1': [1., 2.,", "'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{'a': 0, 'b': 1}, ['c']], [], marks=pytest.mark.xfail(raises=ValueError),", "table_id == 'segments': for f in frame.index.get_level_values( define.IndexField.FILE): if os.path.exists(f): os.remove(f) @pytest.mark.parametrize( 'objs,", "pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index( pytest.DB.files[:2], [0.1,", "pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR * 2],", "), ), ( StringIO('''file,value1,value2 f1,0.0,a f2,1.0,b f3,2.0,c'''), pd.DataFrame( { 'value1': [0.0, 1.0, 2.0],", "dtype='object'), ), ( [pd.Series([], audformat.filewise_index(), dtype='object')], False, pd.Series([], audformat.filewise_index(), dtype='object') ), ( [pd.Series([],", "f in files] assert all(os.path.exists(f) for f in files) file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0]", "'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), ( audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ]", "'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2',", "'f2', 'f3'], [0, 0, 0, 0, 0], [pd.NaT, 1, pd.NaT, 1, 1], ),", "), # combine series and overwrite values ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')),", "False, pd.Series([], audformat.filewise_index(), dtype='object'), ), ( [pd.Series([], audformat.filewise_index(), dtype='object')], False, pd.Series([], audformat.filewise_index(), dtype='object')", "'c2': ['a', 'b', 'c', 'd'] }, audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ), #", "audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index( ['f1', 'f2', 'f3'], [0, 0,", "['f1', 'f2', 'f3'], name='file', ), ), ( StringIO('''file,value f1,0.0 f2,1.0 f3,2.0'''), pd.Series( [0.0,", "audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3',", "[0, 0], [1, 1]), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'],", "marks=pytest.mark.xfail(raises=ValueError), ), # error: values do not match pytest.param( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.],", "[ ( StringIO('''file f1 f2 f3'''), pd.Index( ['f1', 'f2', 'f3'], name='file', ), ),", "), columns=['value1', 'value2'], ), ), pytest.param( StringIO('''value 0.0 1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) )", "[0.1, 0.5], [0.2, pytest.FILE_DUR], ), ), # provide file durations ( audformat.filewise_index(pytest.DB.files[:2]), False,", "pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj, allow_nat, files_duration, root, expected', [ # empty", "[], [], ), ( (['a'], ['b']), ['a', 'b'], ), ( (['a'], ['b', 'c']),", "'f2', 'f3']), dtype='category', ), pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ),", "'2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( {", "'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2', 0, 1), ), ] ) def test_intersect(objs, expected):", "audformat.utils.join_schemes([db1, db2], 'scheme_id') assert db1.schemes['scheme_id'] == expected assert db2.schemes['scheme_id'] == expected # Three", "), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1',", "), ), pytest.param( StringIO('''value 0.0 1.0 2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) ) ]) def test_read_csv(csv,", "for every file # for segmented only where end == NaT files =", "segmented index with NaT, but file is missing pytest.param( audformat.segmented_index(['f1'], [0]), None, marks=pytest.mark.xfail(raises=FileNotFoundError),", "'eng'), ('english', 'eng'), ('English', 'eng'), pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx', None,", "['a', 'b']}, index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ] ) def test_to_segmented_index(obj,", "extension, pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj, allow_nat, files_duration, root, expected', [ #", "audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64' ), ), (", "2.], }, audformat.segmented_index( ['f1', 'f1'], [0, 0], [None, 1], ), ), ), #", "None, None, audformat.segmented_index(), ), ( audformat.segmented_index(), True, None, None, audformat.segmented_index(), ), ( audformat.segmented_index(),", "'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(), ), ( [ audformat.segmented_index(), ], audformat.segmented_index(), ), ( [", "os.path.sep, audformat.filewise_index( [ audeer.safe_path(os.path.join('some', 'where', 'f1')), audeer.safe_path(os.path.join('some', 'where', 'f2')), ] ), ), (", "2], audformat.filewise_index(['f1', 'f2'])), pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series( [1, 2], audformat.filewise_index(['f1',", "pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b',", "expected', [ ( audformat.filewise_index(), None, audformat.filewise_index(), ), ( audformat.segmented_index(), None, audformat.segmented_index(), ), (", "), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2', 'f3']), ),", "f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '0s', '0s'], ends=['1s',", "f1,00:00:01,0.0 f1,00:00:02,1.0 f2,00:00:03,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '0s',", "], audformat.segmented_index(), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1',", "pd.Series([1.], audformat.filewise_index('f1')), pd.Series([np.nan], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')),", "files = [os.path.join(pytest.DB_ROOT, f) for f in files] assert all(os.path.exists(f) for f in", "), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2',", "( audformat.segmented_index(), 'mp3', None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3', None, audformat.filewise_index(['f1.mp3', 'f2.mp3']),", "pytest.param( ['a', 'b', 'c'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( ('a', 'b', 'c'), [], marks=pytest.mark.xfail(raises=ValueError),", "audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2',", "'f2.wav']), 'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']),", "['b', 'c'], 'd'], [], marks=pytest.mark.xfail(raises=ValueError), ), pytest.param( [{0: {'age': 20}}, {'0': {'age': 30}}],", "utils.map_language(language) == expected @pytest.mark.parametrize('csv,result', [ ( StringIO('''file f1 f2 f3'''), pd.Index( ['f1', 'f2',", "'3s'], ), name='value', ), ), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a f1,00:00:01,00:00:02,1.0,b f2,00:00:02,00:00:03,2.0,c'''), pd.DataFrame( { 'value1':", "expected = audformat.Scheme(labels={'a': [1, 2], 'b': [3]}) audformat.utils.join_schemes([db1, db2], 'scheme_id') assert db1.schemes['scheme_id'] ==", "for f in files) file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in files] assert", "audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s', '3s'], ['2s', '4s'], ), ) ] ) def test_expand_file_path(tmpdir,", "'f2']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0, 0, 0, 0],", "[2]), dtype='category', ), pd.Timedelta(1, unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])), pd.Timedelta(1, unit='s'), ),", "), ), ( StringIO('''file,start,value f1,00:00:00,0.0 f1,00:00:01,1.0 f2,00:00:02,2.0'''), pd.Series( [0.0, 1.0, 2.0], index=audformat.segmented_index( ['f1',", "'f4']), ], audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "'f2')), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where') + os.path.sep, audformat.filewise_index( [", "starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), name='value', ), ), ( StringIO('''file,start,end,value1,value2 f1,00:00:00,00:00:01,0.0,a", "'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index(", "( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1,", "with pytest.raises(ValueError): db = audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id') @pytest.mark.parametrize( 'language, expected',", "pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), True, None, None, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5],", "= audformat.utils.duration(obj) if pd.isnull(expected_duration): assert pd.isnull(duration) else: assert duration == expected_duration @pytest.mark.parametrize( 'index,", "2.0'''), None, marks=pytest.mark.xfail(raises=ValueError) ) ]) def test_read_csv(csv, result): obj = audformat.utils.read_csv(csv) if isinstance(result,", "'f1', 'f2'], starts=['0s', '0s', '0s'], ends=['1s', '2s', '3s'], ), name='value', ), ), (", "dtype='Int64', ), ), # combine series with different names ( [ pd.Series([1.], audformat.filewise_index('f1'),", "[1, 2], index=audformat.filewise_index(pytest.DB.files[:2]), ), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame( {'int': [1,", "audformat.filewise_index('f2')), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.segmented_index('f1')), pd.Series([2.],", "pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), ], False, pd.Series( ['a', 'b',", "}, index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), columns=['value1',", "dtype='category', ), ], False, None, marks=pytest.mark.xfail(raises=ValueError), ), # error: values do not match", "'4s'], ), '.', audformat.segmented_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ], ['1s', '3s'], ['2s', '4s'], ),", "True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( pd.DataFrame( {'int': [1, 2], 'str': ['a', 'b']},", "'mp3', None, audformat.segmented_index(['f1.mp3', 'f2.mp3']), ), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ),", "[ pd.Series( [1, 2, 3], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b', 'a'],", "[1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64' ), ), ( [ pd.Series( True, audformat.filewise_index('f1'), dtype='bool',", "'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ) ), ( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1',", "), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']),", "audformat.segmented_index(), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.segmented_index(), ], audformat.segmented_index(['f1', 'f2']), ), ( [", "audformat.filewise_index('f1'), dtype='int64', ), pd.Series( 2, audformat.filewise_index('f2'), dtype='int64', ), ], False, pd.Series( [1, 2],", "pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR * 2], ), ), # file", "audformat.segmented_index(), '0', ), ( audformat.filewise_index(['f1', 'f2']), '-4231615416436839963', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ),", "matching dtype ( [ pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='int64', ), pd.Series( [1,", "f'_{j}' for i in range(1, 11) for j in range(10) ] ), (", "'f3']), ), ), ( [ pd.Series( [1., 2.], audformat.filewise_index(['f1', 'f2']), name='c1', ), pd.Series(", "), ( audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), '-3831446135233514455', ), ( pd.Series([0, 1],", "audformat.utils.expand_file_path(index, root) pd.testing.assert_index_equal(expanded_index, expected) @pytest.mark.parametrize( 'obj, expected', [ ( audformat.filewise_index(), '0', ), (", "0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f1'],", "( audformat.filewise_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]), ), ( audformat.segmented_index(pytest.DB.files[:2]), True, None, None, audformat.segmented_index(pytest.DB.files[:2]),", "scheme2 = audformat.Scheme(labels={'b': [3]}) db2.schemes['scheme_id'] = scheme2 expected = audformat.Scheme(labels={'a': [1, 2], 'b':", "has_existed: # output folder was created and can be removed if os.path.exists(output_folder): shutil.rmtree(output_folder)", "), ( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), ( audformat.filewise_index(['f1', 'f2.wv']), 'mp3',", "0}], {'a': 0}, ), ( [{'a': 0}, {'b': 1}], {'a': 0, 'b': 1},", "filewise index, but file is missing pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), # segmented", "result = result.index pd.testing.assert_index_equal(result, expected) if files_duration and not allow_nat: # for filewise", "schemes without labels with pytest.raises(ValueError): db = audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db], 'scheme_id')", "audformat.index_type(frame) == define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files = frame.index.get_level_values(define.IndexField.FILE).values if table_id ==", "pd.Series([1.], audformat.filewise_index('f1')), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([1.], audformat.filewise_index('f1')), ], False, pd.Series([1.], audformat.filewise_index('f1')),", "'objs, overwrite, expected', [ # empty ( [], False, pd.Series([], audformat.filewise_index(), dtype='object'), ),", "[f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in files] assert file_names == expected_file_names # clean-up if", "dtype='category', ) ), # combine series with non-nullable dtype ( [ pd.Series([1, 2],", "audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2']), '-2363261461673824215', ), ( audformat.segmented_index(['f1', 'f2'], [0,", "'f3']), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(['f1', 'f2', 'f3']),", "'eng'), pytest.param( 'xx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'xxx', None, marks=pytest.mark.xfail(raises=ValueError) ), pytest.param( 'Bad", "'f2'], [0, 0], [1, 1]), ), '-103439349488189352', ), ] ) def test_hash(obj, expected):", "for schemes without labels with pytest.raises(ValueError): db = audformat.Database('db') db.schemes['scheme_id'] = audformat.Scheme('str') audformat.utils.join_schemes([db],", "3., np.nan], 'c2': ['a', 'b', 'c', 'd'] }, audformat.segmented_index(['f1', 'f2', 'f3', 'f4']), ),", "[0.2, pd.NaT], ), ), # forbid nat ( audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index(", "( audformat.filewise_index(['f1.ogg', 'f2.wav']), 'mp3', '.ogg', audformat.filewise_index(['f1.mp3', 'f2.wav']), ), ] ) def test_replace_file_extension(index, extension,", "False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')), pd.Series([2.], audformat.filewise_index('f2')), ],", "[np.nan, 3.], 'c2': ['b', 'c'], }, audformat.segmented_index(['f2', 'f3']), ), ], False, pd.DataFrame( {", "['f1', 'f1', 'f2', 'f3'], [0, 0, 0, 0], [pd.NaT, 1, 1, 1], ),", "[1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1', 'f1',", "], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'],", "pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ), ), ( audformat.segmented_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index(", "audformat.filewise_index(['f1', 'f2'])), ], False, pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])), ), ( [ pd.Series([1.], audformat.filewise_index('f1')),", "pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.0, 0.0], [pytest.FILE_DUR, pytest.FILE_DUR *", "f in files) file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in files] assert file_names", "audformat.segmented_index(), ), ( audformat.segmented_index(), False, None, None, audformat.segmented_index(), ), # allow nat (", "nat ( audformat.filewise_index(pytest.DB.files[:2]), False, None, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0, 0], [pytest.FILE_DUR, pytest.FILE_DUR] ),", "audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f2', 'f3']), ], audformat.segmented_index('f2', 0,", "0], [pd.NaT, 1, 1, 1], ), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0],", "audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [0.2, pd.NaT], ), ), # forbid nat ( audformat.filewise_index(pytest.DB.files[:2]),", "'value2': ['a', 'b', 'c'], }, index=audformat.segmented_index( ['f1', 'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s',", "'f1', 'f2'], starts=['0s', '1s', '2s'], ends=['1s', '2s', '3s'], ), name='value', ), ), (", "[2.], audformat.filewise_index('f2'), name='c2', ), ], False, pd.DataFrame( { 'c1': [1., 2.], 'c2': [np.nan,", "( [ pd.Series([1.], audformat.filewise_index('f1'), name='c1'), pd.Series([2.], audformat.filewise_index('f1'), name='c2'), ], False, pd.DataFrame( { 'c1':", "pd.Series( [1.], audformat.filewise_index('f1'), name='c1'), pd.Series( [2.], audformat.segmented_index('f1', 0, 1), name='c2', ), ], False,", "= audformat.utils.replace_file_extension( index, extension, pattern=pattern, ) pd.testing.assert_index_equal(index, expected_index) @pytest.mark.parametrize( 'obj, allow_nat, files_duration, root,", "# already `framewise` frame is unprocessed assert os.path.isabs(output_folder) == os.path.isabs(files[0]) if table_id ==", "{'age': 20}}, {'0': {'age': 30}}], [], marks=pytest.mark.xfail(raises=ValueError), ), ] ) def test_join_labels(labels, expected):", "1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index('f2', 0, 1),", "( pd.Series( index=audformat.segmented_index(['f1'], [1], [2]), dtype='category', ), pd.Timedelta(1, unit='s'), ), ( pd.DataFrame(index=audformat.segmented_index(['f1'], [1],", "( audformat.filewise_index(['f1.WAV', 'f2.WAV']), 'MP3', None, audformat.filewise_index(['f1.MP3', 'f2.MP3']), ), ( audformat.filewise_index(['f1', 'f2.wv']), 'mp3', None,", "[1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), ], audformat.segmented_index('f2', 0, 1), ),", "'index, root, expected', [ ( audformat.filewise_index(), None, audformat.filewise_index(), ), ( audformat.segmented_index(), None, audformat.segmented_index(),", "'f2']), dtype='int64', ), pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='Int64', ), ], False, pd.Series(", "'f2']), '.', audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some',", ") def test_duration(obj, expected_duration): duration = audformat.utils.duration(obj) if pd.isnull(expected_duration): assert pd.isnull(duration) else: assert", "'.', audformat.filewise_index( [ audeer.safe_path('f1'), audeer.safe_path('f2'), ] ), ), ( audformat.filewise_index(['f1', 'f2']), os.path.join('some', 'where'),", "'f1'], [0, 0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]), ], audformat.segmented_index(", "== define.IndexType.FILEWISE pd.testing.assert_frame_equal( pytest.DB[table_id].get().reset_index(drop=True), frame.reset_index(drop=True), ) files = frame.index.get_level_values(define.IndexField.FILE).values if table_id == 'segmented':", "if table_id == 'files': # files of unprocessed frame are relative to `root`", "as np import pandas as pd import pytest import audeer import audformat from", "audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0, 0, 0,", "], False, pd.DataFrame( { 'c1': [1., 2.], 'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']),", "), pd.Series( ['a', 'b', 'c'], index=audformat.filewise_index(['f1', 'f2', 'f3']), dtype='category', ), ], False, None,", "] ) def test_replace_file_extension(index, extension, pattern, expected_index): index = audformat.utils.replace_file_extension( index, extension, pattern=pattern,", "= result.index pd.testing.assert_index_equal(result, expected) if files_duration and not allow_nat: # for filewise tables", "audformat.segmented_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f1', 'f2']), ], audformat.segmented_index(['f1',", "test_duration(obj, expected_duration): duration = audformat.utils.duration(obj) if pd.isnull(expected_duration): assert pd.isnull(duration) else: assert duration ==", "== NaT files = result.get_level_values(audformat.define.IndexField.FILE) if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED: mask = result.get_level_values( audformat.define.IndexField.END", "{'b': 2, 'c': 2}], {'a': 0, 'b': 2, 'c': 2}, ), ( [{'a':", "pytest.param( [ pd.Series( [1, 2, 3], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a', 'b',", "all(os.path.exists(f) for f in files) file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in files]", "None, audformat.filewise_index(), ), ( audformat.segmented_index(), 'mp3', None, audformat.segmented_index(), ), ( audformat.filewise_index(['f1.wav', 'f2.wav']), 'mp3',", "[ audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f1'), audeer.safe_path(os.path.join('some', 'where')) + os.path.sep + audeer.safe_path('f2'),", "], audformat.filewise_index('f2'), ), ( [ audformat.filewise_index(['f1', 'f2']), audformat.filewise_index(['f1', 'f2']), audformat.filewise_index('f3'), ], audformat.filewise_index(), ),", "0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f3'], [0, 0, 0,", "'f2']), ], audformat.segmented_index('f2', 0, 1), ), ( [ audformat.segmented_index(['f1', 'f2'], [0, 0], [1,", "= audformat.Scheme(labels={'a': [1, 2]}) db1.schemes['scheme_id'] = scheme1 audformat.utils.join_schemes([db1], 'scheme_id') assert db1.schemes['scheme_id'] == scheme1", "we expect a duration for every file # for segmented only where end", "with matching dtype ( [ pd.Series( [1, 2], audformat.filewise_index(['f1', 'f2']), dtype='int64', ), pd.Series(", "0], [1, 1]), audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]), audformat.filewise_index('f1'), ], audformat.segmented_index( ['f1',", "audformat.segmented_index( ['f1', 'f2', 'f3'], [0, 0, 0], [1, 1, 1], ), ), (", "'c2': [np.nan, 2.], }, audformat.filewise_index(['f1', 'f2']), ), ), ( [ pd.Series( [1., 2.],", "1]), audformat.filewise_index(['f1', 'f2']), ], audformat.segmented_index( ['f1', 'f1', 'f2', 'f2', 'f3'], [0, 0, 0,", "# filewise index, but file is missing pytest.param( audformat.filewise_index(['f1']), None, marks=pytest.mark.xfail(raises=FileNotFoundError), ), #", "pd.Index): pd.testing.assert_index_equal(obj, result) elif isinstance(result, pd.Series): pd.testing.assert_series_equal(obj, result) else: pd.testing.assert_frame_equal(obj, result) @pytest.mark.parametrize( 'index,", "[0, 0, 0, 0], [pd.NaT, 1, 1, 1], ), ), ( [ audformat.segmented_index(['f1',", "], audformat.segmented_index(['f1', 'f2']), ), ( [ audformat.segmented_index(['f1', 'f2']), audformat.segmented_index(['f3', 'f4']), ], audformat.segmented_index(), ),", "pytest.DB.files[1]): pytest.FILE_DUR * 2, }, pytest.DB_ROOT, audformat.segmented_index( pytest.DB.files[:2], [0.1, 0.5], [pytest.FILE_DUR, pytest.FILE_DUR *", "), pytest.param( [ pd.Series( ['a', 'b', 'a'], index=audformat.filewise_index(['f1', 'f2', 'f3']), ), pd.Series( ['a',", "1, 2, 2], ), ), ( [ audformat.filewise_index(), audformat.segmented_index(), ], audformat.segmented_index(), ), (" ]
[ "not new_attachments: return [] # no attachments # clean existing attachments for attachment", "don't have permission to remove \\\"%(attachment)s\\\" attachment.\" ) raise serializers.ValidationError( message % {'attachment':", "\\\"%(attachment)s\\\" attachment.\" ) raise serializers.ValidationError( message % {'attachment': attachment.filename} ) if new_attachments: self.update_attachments", "validate_attachments_count(ids) attachments = self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post'] ) new_attachments = self.get_new_attachments(self.context['user'], ids) if", ") return list(queryset) def save(self): if not self.update_attachments: return if self.removed_attachments: for attachment", "import AttachmentSerializer from . import PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size'])", "bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return AttachmentsSerializer( data=self.request.data, context={ 'mode': self.mode, 'user': self.user, 'post': self.post,", "ids: return [] queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, ) return list(queryset) def save(self):", "return [] queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, ) return list(queryset) def save(self): if", "user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, ) return list(queryset) def save(self): if not self.update_attachments: return if", "ids) if not attachments and not new_attachments: return [] # no attachments #", "[] queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, ) return list(queryset) def save(self): if not", "save(self, serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self, ids): self.update_attachments", "if not self.update_attachments: return if self.removed_attachments: for attachment in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id", "not ids: return [] queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, ) return list(queryset) def", "single post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError( message % { 'limit_value': settings.MISAGO_POST_ATTACHMENTS_LIMIT,", "id, descending self.final_attachments.sort(key=lambda a: a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for a in self.final_attachments] ).update(post=self.context['post'])", "self.get_new_attachments(self.context['user'], ids) if not attachments and not new_attachments: return [] # no attachments", "for a in self.removed_attachments] ).delete() if self.final_attachments: # sort final attachments by id,", "post (added %(show_value)s).\", \"You can't attach more than %(limit_value)s flies to single post", "attachments and not new_attachments: return [] # no attachments # clean existing attachments", "attachment in post.attachments_cache: del attachment['acl'] del attachment['post'] del attachment['uploader_ip'] else: post.attachments_cache = None", "ids: self.final_attachments.append(attachment) else: if attachment.acl['can_delete']: self.update_attachments = True self.removed_attachments.append(attachment) else: message = _(", "django.utils.translation import ugettext as _ from django.utils.translation import ungettext from misago.acl import add_acl", "by id, descending self.final_attachments.sort(key=lambda a: a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for a in self.final_attachments]", ") new_attachments = self.get_new_attachments(self.context['user'], ids) if not attachments and not new_attachments: return []", "serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self, ids): self.update_attachments = False self.removed_attachments = [] self.final_attachments =", "a.pk, reverse=True) def get_initial_attachments(self, mode, user, post): attachments = [] if mode ==", "from misago.acl import add_acl from misago.conf import settings from misago.threads.serializers import AttachmentSerializer from", "save(self): if not self.update_attachments: return if self.removed_attachments: for attachment in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter(", "= ungettext( \"You can't attach more than %(limit_value)s file to single post (added", "'post': self.post, } ) def save(self, serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(),", "return if self.removed_attachments: for attachment in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for a in", "many=True).data for attachment in post.attachments_cache: del attachment['acl'] del attachment['post'] del attachment['uploader_ip'] else: post.attachments_cache", "%(limit_value)s file to single post (added %(show_value)s).\", \"You can't attach more than %(limit_value)s", "self.final_attachments.sort(key=lambda a: a.pk, reverse=True) def get_initial_attachments(self, mode, user, post): attachments = [] if", "'user': self.user, 'post': self.post, } ) def save(self, serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments", "attach more than %(limit_value)s flies to single post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise", "= list(queryset) add_acl(user, attachments) return attachments def get_new_attachments(self, user, ids): if not ids:", "= False self.removed_attachments = [] self.final_attachments = [] ids = list(set(ids)) validate_attachments_count(ids) attachments", "= AttachmentSerializer(attachments, many=True).data for attachment in post.attachments_cache: del attachment['acl'] del attachment['post'] del attachment['uploader_ip']", "if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext( \"You can't attach more than %(limit_value)s", "serializers.ValidationError( message % {'attachment': attachment.filename} ) if new_attachments: self.update_attachments = True self.final_attachments +=", "[] # no attachments # clean existing attachments for attachment in attachments: if", "= serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self, ids): self.update_attachments = False self.removed_attachments = [] self.final_attachments", "self.final_attachments.append(attachment) else: if attachment.acl['can_delete']: self.update_attachments = True self.removed_attachments.append(attachment) else: message = _( \"You", "self.removed_attachments.append(attachment) else: message = _( \"You don't have permission to remove \\\"%(attachment)s\\\" attachment.\"", "mode == PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype') attachments = list(queryset) add_acl(user, attachments) return attachments", "a: a.pk, reverse=True) def get_initial_attachments(self, mode, user, post): attachments = [] if mode", "self.removed_attachments = [] self.final_attachments = [] ids = list(set(ids)) validate_attachments_count(ids) attachments = self.get_initial_attachments(", "PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return AttachmentsSerializer( data=self.request.data,", "else: if attachment.acl['can_delete']: self.update_attachments = True self.removed_attachments.append(attachment) else: message = _( \"You don't", "ids): self.update_attachments = False self.removed_attachments = [] self.final_attachments = [] ids = list(set(ids))", "import PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return AttachmentsSerializer(", "True self.removed_attachments.append(attachment) else: message = _( \"You don't have permission to remove \\\"%(attachment)s\\\"", "new_attachments: self.update_attachments = True self.final_attachments += new_attachments self.final_attachments.sort(key=lambda a: a.pk, reverse=True) def get_initial_attachments(self,", "new_attachments self.final_attachments.sort(key=lambda a: a.pk, reverse=True) def get_initial_attachments(self, mode, user, post): attachments = []", "for attachment in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for a in self.removed_attachments] ).delete() if", "self.update_attachments: return if self.removed_attachments: for attachment in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for a", "del attachment['uploader_ip'] else: post.attachments_cache = None post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments = len(data) if", "misago.acl import add_acl from misago.conf import settings from misago.threads.serializers import AttachmentSerializer from .", "a in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self, post, attachments): if attachments: post.attachments_cache", "[] self.final_attachments = [] ids = list(set(ids)) validate_attachments_count(ids) attachments = self.get_initial_attachments( self.context['mode'], self.context['user'],", "ungettext from misago.acl import add_acl from misago.conf import settings from misago.threads.serializers import AttachmentSerializer", "attachments for attachment in attachments: if attachment.pk in ids: self.final_attachments.append(attachment) else: if attachment.acl['can_delete']:", "attachment.pk in ids: self.final_attachments.append(attachment) else: if attachment.acl['can_delete']: self.update_attachments = True self.removed_attachments.append(attachment) else: message", "attachment.\" ) raise serializers.ValidationError( message % {'attachment': attachment.filename} ) if new_attachments: self.update_attachments =", "attachments: post.attachments_cache = AttachmentSerializer(attachments, many=True).data for attachment in post.attachments_cache: del attachment['acl'] del attachment['post']", "attachment['acl'] del attachment['post'] del attachment['uploader_ip'] else: post.attachments_cache = None post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments", "%(limit_value)s flies to single post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError( message %", "attachments # clean existing attachments for attachment in attachments: if attachment.pk in ids:", "self.removed_attachments: for attachment in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for a in self.removed_attachments] ).delete()", "def validate_attachments(self, ids): self.update_attachments = False self.removed_attachments = [] self.final_attachments = [] ids", "= [] self.final_attachments = [] ids = list(set(ids)) validate_attachments_count(ids) attachments = self.get_initial_attachments( self.context['mode'],", "validate_attachments(self, ids): self.update_attachments = False self.removed_attachments = [] self.final_attachments = [] ids =", "and not new_attachments: return [] # no attachments # clean existing attachments for", "'mode': self.mode, 'user': self.user, 'post': self.post, } ) def save(self, serializer): serializer.save() class", "self.user, 'post': self.post, } ) def save(self, serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments =", "reverse=True) def get_initial_attachments(self, mode, user, post): attachments = [] if mode == PostingEndpoint.EDIT:", "post): attachments = [] if mode == PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype') attachments =", "import add_acl from misago.conf import settings from misago.threads.serializers import AttachmentSerializer from . import", "PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return AttachmentsSerializer( data=self.request.data, context={", "True self.final_attachments += new_attachments self.final_attachments.sort(key=lambda a: a.pk, reverse=True) def get_initial_attachments(self, mode, user, post):", "list(set(ids)) validate_attachments_count(ids) attachments = self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post'] ) new_attachments = self.get_new_attachments(self.context['user'], ids)", "} ) def save(self, serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(), required=False) def", "misago.threads.serializers import AttachmentSerializer from . import PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return", "self.update_attachments = False self.removed_attachments = [] self.final_attachments = [] ids = list(set(ids)) validate_attachments_count(ids)", "False self.removed_attachments = [] self.final_attachments = [] ids = list(set(ids)) validate_attachments_count(ids) attachments =", "= list(set(ids)) validate_attachments_count(ids) attachments = self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post'] ) new_attachments = self.get_new_attachments(self.context['user'],", "self.context['user'], self.context['post'] ) new_attachments = self.get_new_attachments(self.context['user'], ids) if not attachments and not new_attachments:", "if not attachments and not new_attachments: return [] # no attachments # clean", "return list(queryset) def save(self): if not self.update_attachments: return if self.removed_attachments: for attachment in", "attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for a in self.removed_attachments] ).delete() if self.final_attachments: # sort final", "def get_initial_attachments(self, mode, user, post): attachments = [] if mode == PostingEndpoint.EDIT: queryset", "add_acl(user, attachments) return attachments def get_new_attachments(self, user, ids): if not ids: return []", "(added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError( message % { 'limit_value': settings.MISAGO_POST_ATTACHMENTS_LIMIT, 'show_value': total_attachments,", "django.utils.translation import ungettext from misago.acl import add_acl from misago.conf import settings from misago.threads.serializers", "post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments = len(data) if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext(", "import serializers from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from", "AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self, ids): self.update_attachments = False self.removed_attachments =", "existing attachments for attachment in attachments: if attachment.pk in ids: self.final_attachments.append(attachment) else: if", "\"You don't have permission to remove \\\"%(attachment)s\\\" attachment.\" ) raise serializers.ValidationError( message %", "attachments def get_new_attachments(self, user, ids): if not ids: return [] queryset = user.attachment_set.select_related('filetype').filter(", "post.attachments_cache = None post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments = len(data) if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT:", "def validate_attachments_count(data): total_attachments = len(data) if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext( \"You", "if not ids: return [] queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, ) return list(queryset)", "post__isnull=True, id__in=ids, ) return list(queryset) def save(self): if not self.update_attachments: return if self.removed_attachments:", "message = _( \"You don't have permission to remove \\\"%(attachment)s\\\" attachment.\" ) raise", ") raise serializers.ValidationError( message % {'attachment': attachment.filename} ) if new_attachments: self.update_attachments = True", "self.removed_attachments] ).delete() if self.final_attachments: # sort final attachments by id, descending self.final_attachments.sort(key=lambda a:", "data=self.request.data, context={ 'mode': self.mode, 'user': self.user, 'post': self.post, } ) def save(self, serializer):", "= _( \"You don't have permission to remove \\\"%(attachment)s\\\" attachment.\" ) raise serializers.ValidationError(", "if self.removed_attachments: for attachment in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for a in self.removed_attachments]", "import ugettext as _ from django.utils.translation import ungettext from misago.acl import add_acl from", "else: message = _( \"You don't have permission to remove \\\"%(attachment)s\\\" attachment.\" )", "than %(limit_value)s file to single post (added %(show_value)s).\", \"You can't attach more than", "attach more than %(limit_value)s file to single post (added %(show_value)s).\", \"You can't attach", "= len(data) if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext( \"You can't attach more", "permission to remove \\\"%(attachment)s\\\" attachment.\" ) raise serializers.ValidationError( message % {'attachment': attachment.filename} )", "= [] if mode == PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype') attachments = list(queryset) add_acl(user,", "def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return AttachmentsSerializer( data=self.request.data, context={ 'mode': self.mode, 'user':", "in self.removed_attachments] ).delete() if self.final_attachments: # sort final attachments by id, descending self.final_attachments.sort(key=lambda", "more than %(limit_value)s flies to single post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError(", "attachments by id, descending self.final_attachments.sort(key=lambda a: a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for a in", "a in self.removed_attachments] ).delete() if self.final_attachments: # sort final attachments by id, descending", "[] ids = list(set(ids)) validate_attachments_count(ids) attachments = self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post'] ) new_attachments", "= post.attachment_set.select_related('filetype') attachments = list(queryset) add_acl(user, attachments) return attachments def get_new_attachments(self, user, ids):", "attachments = self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post'] ) new_attachments = self.get_new_attachments(self.context['user'], ids) if not", "self.final_attachments.sort(key=lambda a: a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for a in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments)", "= user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, ) return list(queryset) def save(self): if not self.update_attachments: return", "post, attachments): if attachments: post.attachments_cache = AttachmentSerializer(attachments, many=True).data for attachment in post.attachments_cache: del", "attachments): if attachments: post.attachments_cache = AttachmentSerializer(attachments, many=True).data for attachment in post.attachments_cache: del attachment['acl']", "post.attachments_cache: del attachment['acl'] del attachment['post'] del attachment['uploader_ip'] else: post.attachments_cache = None post.update_fields.append('attachments_cache') def", "use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return AttachmentsSerializer( data=self.request.data, context={ 'mode': self.mode, 'user': self.user,", "to single post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError( message % { 'limit_value':", "misago.conf import settings from misago.threads.serializers import AttachmentSerializer from . import PostingEndpoint, PostingMiddleware class", ". import PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return", "\"You can't attach more than %(limit_value)s file to single post (added %(show_value)s).\", \"You", "self.context['mode'], self.context['user'], self.context['post'] ) new_attachments = self.get_new_attachments(self.context['user'], ids) if not attachments and not", "descending self.final_attachments.sort(key=lambda a: a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for a in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'],", "self.context['post'] ) new_attachments = self.get_new_attachments(self.context['user'], ids) if not attachments and not new_attachments: return", "settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext( \"You can't attach more than %(limit_value)s file to single", "in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for a in self.removed_attachments] ).delete() if self.final_attachments: #", "settings from misago.threads.serializers import AttachmentSerializer from . import PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def", "in post.attachments_cache: del attachment['acl'] del attachment['post'] del attachment['uploader_ip'] else: post.attachments_cache = None post.update_fields.append('attachments_cache')", "# clean existing attachments for attachment in attachments: if attachment.pk in ids: self.final_attachments.append(attachment)", "attachment.filename} ) if new_attachments: self.update_attachments = True self.final_attachments += new_attachments self.final_attachments.sort(key=lambda a: a.pk,", "for attachment in attachments: if attachment.pk in ids: self.final_attachments.append(attachment) else: if attachment.acl['can_delete']: self.update_attachments", "attachments: if attachment.pk in ids: self.final_attachments.append(attachment) else: if attachment.acl['can_delete']: self.update_attachments = True self.removed_attachments.append(attachment)", "id__in=ids, ) return list(queryset) def save(self): if not self.update_attachments: return if self.removed_attachments: for", "than %(limit_value)s flies to single post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError( message", "from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from misago.acl import", "ugettext as _ from django.utils.translation import ungettext from misago.acl import add_acl from misago.conf", "self.update_attachments = True self.removed_attachments.append(attachment) else: message = _( \"You don't have permission to", "def sync_attachments_cache(self, post, attachments): if attachments: post.attachments_cache = AttachmentSerializer(attachments, many=True).data for attachment in", "can't attach more than %(limit_value)s flies to single post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, )", "self.update_attachments = True self.final_attachments += new_attachments self.final_attachments.sort(key=lambda a: a.pk, reverse=True) def get_initial_attachments(self, mode,", "add_acl from misago.conf import settings from misago.threads.serializers import AttachmentSerializer from . import PostingEndpoint,", "from misago.threads.serializers import AttachmentSerializer from . import PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self):", "AttachmentSerializer(attachments, many=True).data for attachment in post.attachments_cache: del attachment['acl'] del attachment['post'] del attachment['uploader_ip'] else:", "return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return AttachmentsSerializer( data=self.request.data, context={ 'mode': self.mode, 'user': self.user, 'post':", "to remove \\\"%(attachment)s\\\" attachment.\" ) raise serializers.ValidationError( message % {'attachment': attachment.filename} ) if", "# no attachments # clean existing attachments for attachment in attachments: if attachment.pk", "# sort final attachments by id, descending self.final_attachments.sort(key=lambda a: a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id", "self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self, post, attachments): if attachments: post.attachments_cache = AttachmentSerializer(attachments,", "for attachment in post.attachments_cache: del attachment['acl'] del attachment['post'] del attachment['uploader_ip'] else: post.attachments_cache =", "else: post.attachments_cache = None post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments = len(data) if total_attachments >", "can't attach more than %(limit_value)s file to single post (added %(show_value)s).\", \"You can't", "queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, ) return list(queryset) def save(self): if not self.update_attachments:", "_ from django.utils.translation import ungettext from misago.acl import add_acl from misago.conf import settings", "return [] # no attachments # clean existing attachments for attachment in attachments:", "ids): if not ids: return [] queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, ) return", "{'attachment': attachment.filename} ) if new_attachments: self.update_attachments = True self.final_attachments += new_attachments self.final_attachments.sort(key=lambda a:", "ungettext( \"You can't attach more than %(limit_value)s file to single post (added %(show_value)s).\",", "= [] ids = list(set(ids)) validate_attachments_count(ids) attachments = self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post'] )", "def save(self, serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self, ids):", "queryset = post.attachment_set.select_related('filetype') attachments = list(queryset) add_acl(user, attachments) return attachments def get_new_attachments(self, user,", "== PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype') attachments = list(queryset) add_acl(user, attachments) return attachments def", "return attachments def get_new_attachments(self, user, ids): if not ids: return [] queryset =", "get_new_attachments(self, user, ids): if not ids: return [] queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids,", "in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self, post, attachments): if attachments: post.attachments_cache =", "= self.get_new_attachments(self.context['user'], ids) if not attachments and not new_attachments: return [] # no", "message % {'attachment': attachment.filename} ) if new_attachments: self.update_attachments = True self.final_attachments += new_attachments", "= True self.final_attachments += new_attachments self.final_attachments.sort(key=lambda a: a.pk, reverse=True) def get_initial_attachments(self, mode, user,", "self.context['user'].attachment_set.filter( id__in=[a.id for a in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self, post, attachments):", "get_initial_attachments(self, mode, user, post): attachments = [] if mode == PostingEndpoint.EDIT: queryset =", "context={ 'mode': self.mode, 'user': self.user, 'post': self.post, } ) def save(self, serializer): serializer.save()", "(added %(show_value)s).\", \"You can't attach more than %(limit_value)s flies to single post (added", "required=False) def validate_attachments(self, ids): self.update_attachments = False self.removed_attachments = [] self.final_attachments = []", "list(queryset) def save(self): if not self.update_attachments: return if self.removed_attachments: for attachment in self.removed_attachments:", "as _ from django.utils.translation import ungettext from misago.acl import add_acl from misago.conf import", "= True self.removed_attachments.append(attachment) else: message = _( \"You don't have permission to remove", "id__in=[a.id for a in self.removed_attachments] ).delete() if self.final_attachments: # sort final attachments by", "for a in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self, post, attachments): if attachments:", "attachments = serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self, ids): self.update_attachments = False self.removed_attachments = []", "> settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext( \"You can't attach more than %(limit_value)s file to", "more than %(limit_value)s file to single post (added %(show_value)s).\", \"You can't attach more", "self.context['post'].attachment_set.filter( id__in=[a.id for a in self.removed_attachments] ).delete() if self.final_attachments: # sort final attachments", "from misago.conf import settings from misago.threads.serializers import AttachmentSerializer from . import PostingEndpoint, PostingMiddleware", ") def save(self, serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self,", "= None post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments = len(data) if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message", "rest_framework import serializers from django.utils.translation import ugettext as _ from django.utils.translation import ungettext", "% {'attachment': attachment.filename} ) if new_attachments: self.update_attachments = True self.final_attachments += new_attachments self.final_attachments.sort(key=lambda", "sync_attachments_cache(self, post, attachments): if attachments: post.attachments_cache = AttachmentSerializer(attachments, many=True).data for attachment in post.attachments_cache:", "total_attachments = len(data) if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext( \"You can't attach", "AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return AttachmentsSerializer( data=self.request.data, context={ 'mode': self.mode,", "sort final attachments by id, descending self.final_attachments.sort(key=lambda a: a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for", "not attachments and not new_attachments: return [] # no attachments # clean existing", "PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype') attachments = list(queryset) add_acl(user, attachments) return attachments def get_new_attachments(self,", "if self.final_attachments: # sort final attachments by id, descending self.final_attachments.sort(key=lambda a: a.pk, reverse=True)", "reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for a in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self, post,", "AttachmentSerializer from . import PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def", "self.final_attachments) def sync_attachments_cache(self, post, attachments): if attachments: post.attachments_cache = AttachmentSerializer(attachments, many=True).data for attachment", "message = ungettext( \"You can't attach more than %(limit_value)s file to single post", "class AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self, ids): self.update_attachments = False self.removed_attachments", "post.attachment_set.select_related('filetype') attachments = list(queryset) add_acl(user, attachments) return attachments def get_new_attachments(self, user, ids): if", "attachment in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for a in self.removed_attachments] ).delete() if self.final_attachments:", "serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self, ids): self.update_attachments =", "None post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments = len(data) if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message =", "user, post): attachments = [] if mode == PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype') attachments", ").delete() if self.final_attachments: # sort final attachments by id, descending self.final_attachments.sort(key=lambda a: a.pk,", "to single post (added %(show_value)s).\", \"You can't attach more than %(limit_value)s flies to", "get_serializer(self): return AttachmentsSerializer( data=self.request.data, context={ 'mode': self.mode, 'user': self.user, 'post': self.post, } )", "self.post, } ) def save(self, serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(), required=False)", "attachment in attachments: if attachment.pk in ids: self.final_attachments.append(attachment) else: if attachment.acl['can_delete']: self.update_attachments =", "serializer.save() class AttachmentsSerializer(serializers.Serializer): attachments = serializers.ListField(child=serializers.IntegerField(), required=False) def validate_attachments(self, ids): self.update_attachments = False", "in attachments: if attachment.pk in ids: self.final_attachments.append(attachment) else: if attachment.acl['can_delete']: self.update_attachments = True", "attachment.acl['can_delete']: self.update_attachments = True self.removed_attachments.append(attachment) else: message = _( \"You don't have permission", "remove \\\"%(attachment)s\\\" attachment.\" ) raise serializers.ValidationError( message % {'attachment': attachment.filename} ) if new_attachments:", "self.final_attachments += new_attachments self.final_attachments.sort(key=lambda a: a.pk, reverse=True) def get_initial_attachments(self, mode, user, post): attachments", "attachments) return attachments def get_new_attachments(self, user, ids): if not ids: return [] queryset", ").update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self, post, attachments): if attachments: post.attachments_cache = AttachmentSerializer(attachments, many=True).data", "self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self, post, attachments): if attachments: post.attachments_cache = AttachmentSerializer(attachments, many=True).data for", "+= new_attachments self.final_attachments.sort(key=lambda a: a.pk, reverse=True) def get_initial_attachments(self, mode, user, post): attachments =", "self.mode, 'user': self.user, 'post': self.post, } ) def save(self, serializer): serializer.save() class AttachmentsSerializer(serializers.Serializer):", "no attachments # clean existing attachments for attachment in attachments: if attachment.pk in", "not self.update_attachments: return if self.removed_attachments: for attachment in self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for", "attachments = list(queryset) add_acl(user, attachments) return attachments def get_new_attachments(self, user, ids): if not", "AttachmentsSerializer( data=self.request.data, context={ 'mode': self.mode, 'user': self.user, 'post': self.post, } ) def save(self,", "new_attachments = self.get_new_attachments(self.context['user'], ids) if not attachments and not new_attachments: return [] #", "[] if mode == PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype') attachments = list(queryset) add_acl(user, attachments)", "self.final_attachments: # sort final attachments by id, descending self.final_attachments.sort(key=lambda a: a.pk, reverse=True) self.context['user'].attachment_set.filter(", "new_attachments: return [] # no attachments # clean existing attachments for attachment in", "attachments = [] if mode == PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype') attachments = list(queryset)", "list(queryset) add_acl(user, attachments) return attachments def get_new_attachments(self, user, ids): if not ids: return", "have permission to remove \\\"%(attachment)s\\\" attachment.\" ) raise serializers.ValidationError( message % {'attachment': attachment.filename}", "settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError( message % { 'limit_value': settings.MISAGO_POST_ATTACHMENTS_LIMIT, 'show_value': total_attachments, } )", "= self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post'] ) new_attachments = self.get_new_attachments(self.context['user'], ids) if not attachments", "class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self): return AttachmentsSerializer( data=self.request.data, context={ 'mode':", "file to single post (added %(show_value)s).\", \"You can't attach more than %(limit_value)s flies", "total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext( \"You can't attach more than %(limit_value)s file", "_( \"You don't have permission to remove \\\"%(attachment)s\\\" attachment.\" ) raise serializers.ValidationError( message", "attachment['uploader_ip'] else: post.attachments_cache = None post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments = len(data) if total_attachments", "%(show_value)s).\", \"You can't attach more than %(limit_value)s flies to single post (added %(show_value)s).\",", "post.attachments_cache = AttachmentSerializer(attachments, many=True).data for attachment in post.attachments_cache: del attachment['acl'] del attachment['post'] del", "flies to single post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError( message % {", "in ids: self.final_attachments.append(attachment) else: if attachment.acl['can_delete']: self.update_attachments = True self.removed_attachments.append(attachment) else: message =", "serializers from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from misago.acl", "if attachment.pk in ids: self.final_attachments.append(attachment) else: if attachment.acl['can_delete']: self.update_attachments = True self.removed_attachments.append(attachment) else:", "attachment['post'] del attachment['uploader_ip'] else: post.attachments_cache = None post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments = len(data)", "raise serializers.ValidationError( message % {'attachment': attachment.filename} ) if new_attachments: self.update_attachments = True self.final_attachments", "id__in=[a.id for a in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self, post, attachments): if", "mode, user, post): attachments = [] if mode == PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype')", "def save(self): if not self.update_attachments: return if self.removed_attachments: for attachment in self.removed_attachments: attachment.delete_files()", "len(data) if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext( \"You can't attach more than", "from rest_framework import serializers from django.utils.translation import ugettext as _ from django.utils.translation import", "self.removed_attachments: attachment.delete_files() self.context['post'].attachment_set.filter( id__in=[a.id for a in self.removed_attachments] ).delete() if self.final_attachments: # sort", "import ungettext from misago.acl import add_acl from misago.conf import settings from misago.threads.serializers import", "validate_attachments_count(data): total_attachments = len(data) if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT: message = ungettext( \"You can't", "\"You can't attach more than %(limit_value)s flies to single post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT,", "if attachments: post.attachments_cache = AttachmentSerializer(attachments, many=True).data for attachment in post.attachments_cache: del attachment['acl'] del", "final attachments by id, descending self.final_attachments.sort(key=lambda a: a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for a", "self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post'] ) new_attachments = self.get_new_attachments(self.context['user'], ids) if not attachments and", "clean existing attachments for attachment in attachments: if attachment.pk in ids: self.final_attachments.append(attachment) else:", "ids = list(set(ids)) validate_attachments_count(ids) attachments = self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post'] ) new_attachments =", "a: a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for a in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def", "import settings from misago.threads.serializers import AttachmentSerializer from . import PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware):", "def get_serializer(self): return AttachmentsSerializer( data=self.request.data, context={ 'mode': self.mode, 'user': self.user, 'post': self.post, }", "if new_attachments: self.update_attachments = True self.final_attachments += new_attachments self.final_attachments.sort(key=lambda a: a.pk, reverse=True) def", "from . import PostingEndpoint, PostingMiddleware class AttachmentsMiddleware(PostingMiddleware): def use_this_middleware(self): return bool(self.user.acl_cache['max_attachment_size']) def get_serializer(self):", ") if new_attachments: self.update_attachments = True self.final_attachments += new_attachments self.final_attachments.sort(key=lambda a: a.pk, reverse=True)", "self.final_attachments = [] ids = list(set(ids)) validate_attachments_count(ids) attachments = self.get_initial_attachments( self.context['mode'], self.context['user'], self.context['post']", "if mode == PostingEndpoint.EDIT: queryset = post.attachment_set.select_related('filetype') attachments = list(queryset) add_acl(user, attachments) return", "def get_new_attachments(self, user, ids): if not ids: return [] queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True,", "user, ids): if not ids: return [] queryset = user.attachment_set.select_related('filetype').filter( post__isnull=True, id__in=ids, )", "%(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError( message % { 'limit_value': settings.MISAGO_POST_ATTACHMENTS_LIMIT, 'show_value': total_attachments, }", "del attachment['acl'] del attachment['post'] del attachment['uploader_ip'] else: post.attachments_cache = None post.update_fields.append('attachments_cache') def validate_attachments_count(data):", "del attachment['post'] del attachment['uploader_ip'] else: post.attachments_cache = None post.update_fields.append('attachments_cache') def validate_attachments_count(data): total_attachments =", "if attachment.acl['can_delete']: self.update_attachments = True self.removed_attachments.append(attachment) else: message = _( \"You don't have", "a.pk, reverse=True) self.context['user'].attachment_set.filter( id__in=[a.id for a in self.final_attachments] ).update(post=self.context['post']) self.sync_attachments_cache(self.context['post'], self.final_attachments) def sync_attachments_cache(self,", "return AttachmentsSerializer( data=self.request.data, context={ 'mode': self.mode, 'user': self.user, 'post': self.post, } ) def", "post (added %(show_value)s).\", settings.MISAGO_POST_ATTACHMENTS_LIMIT, ) raise serializers.ValidationError( message % { 'limit_value': settings.MISAGO_POST_ATTACHMENTS_LIMIT, 'show_value':", "from django.utils.translation import ungettext from misago.acl import add_acl from misago.conf import settings from", "single post (added %(show_value)s).\", \"You can't attach more than %(limit_value)s flies to single" ]
[ "b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'),", "('vida', '0016_auto_20160203_1355'), ] operations = [ migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F',", "max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40',", "dependencies = [ ('vida', '0016_auto_20160203_1355'), ] operations = [ migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True,", "b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'),", "(b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'), (b'#DDDDDD', b'Silver'), (b'#AAAAAA', b'Gray'), (b'#111111', b'Black')]), ),", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations", "(b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B',", "Migration(migrations.Migration): dependencies = [ ('vida', '0016_auto_20160203_1355'), ] operations = [ migrations.AddField( model_name='form', name='color',", "b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'),", "b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'),", "(b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF',", "= [ migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF',", "unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('vida', '0016_auto_20160203_1355'),", "(b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00',", "b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'),", "(b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'), (b'#DDDDDD', b'Silver'), (b'#AAAAAA', b'Gray'), (b'#111111',", "<filename>vida/vida/migrations/0017_form_color.py # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import", "b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'), (b'#DDDDDD', b'Silver'), (b'#AAAAAA', b'Gray'),", "coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class", "null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'),", "import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('vida',", "b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'), (b'#DDDDDD', b'Silver'),", "(b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'), (b'#DDDDDD', b'Silver'), (b'#AAAAAA',", "b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'),", "-*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies", "choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70',", "(b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'), (b'#DDDDDD',", "name='color', field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970',", "[ migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'),", "from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('vida', '0016_auto_20160203_1355'), ]", "django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('vida', '0016_auto_20160203_1355'), ] operations", "b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'), (b'#DDDDDD', b'Silver'), (b'#AAAAAA', b'Gray'), (b'#111111', b'Black')]),", "(b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE',", "from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies =", "= [ ('vida', '0016_auto_20160203_1355'), ] operations = [ migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True, max_length=10,", "b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'),", "model_name='form', name='color', field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'),", "__future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [", "migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC',", "b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'),", "class Migration(migrations.Migration): dependencies = [ ('vida', '0016_auto_20160203_1355'), ] operations = [ migrations.AddField( model_name='form',", "'0016_auto_20160203_1355'), ] operations = [ migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'),", "(b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136',", "import models, migrations class Migration(migrations.Migration): dependencies = [ ('vida', '0016_auto_20160203_1355'), ] operations =", "field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'),", "(b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9',", "(b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B',", "utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration):", "operations = [ migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'),", "migrations class Migration(migrations.Migration): dependencies = [ ('vida', '0016_auto_20160203_1355'), ] operations = [ migrations.AddField(", "] operations = [ migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9',", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models,", "models, migrations class Migration(migrations.Migration): dependencies = [ ('vida', '0016_auto_20160203_1355'), ] operations = [", "[ ('vida', '0016_auto_20160203_1355'), ] operations = [ migrations.AddField( model_name='form', name='color', field=models.CharField(blank=True, max_length=10, null=True,", "b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'), (b'#DDDDDD', b'Silver'), (b'#AAAAAA', b'Gray'), (b'#111111', b'Black')]), ), ]" ]
[ "1} output: {log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\", [0, 1, 2]) def test_isoline_horizontal(degree): sim =", "= Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] -", "c.vector().get_local() for cell in dolfin.cells(sim.data['mesh']): cell_value = 1 if cell.midpoint().y() < 0.5 else", "Vc.dim()) print(probe.name, probe.field_name, probe.value) print(len(lines)) if sim.ncpu > 1: raise pytest.skip() for x,", "= probe.run(force_active=True) if False: from matplotlib import pyplot c = dolfin.plot(sim.data['c']) pyplot.colorbar(c) for", "0 for dof in dm.cell_dofs(cell.index()): arr[dof] = cell_value c.vector().set_local(arr) c.vector().apply('insert') lines = probe.run(force_active=True)", "== 1 assert x[0] == x[-1] and y[0] == y[-1], \"The loop should", "with sharp interface at x[1] == 0.5 Vc = sim.data['Vc'] c = sim.data['c']", "< 5e-3) # Check that the line is clockwise or counter clockwise #", "interface at x[1] == 0.5 Vc = sim.data['Vc'] c = sim.data['c'] dm =", "1, rho1: 1} output: {log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\", [0, 1, 2]) def test_isoline_horizontal(degree):", "sim.data['c'] dm = Vc.dofmap() arr = c.vector().get_local() for cell in dolfin.cells(sim.data['mesh']): cell_value =", "180 / numpy.pi theta[theta < 0] += 360 tdt = numpy.diff(theta) tdt2 =", "print(probe.name, probe.field_name, probe.value) print(len(lines)) if sim.ncpu > 1: raise pytest.skip() for x, y", "in sorted order xdx = numpy.diff(x) assert all(xdx > 0) or all(xdx <", "line is clockwise or counter clockwise # for all segments, no going back", "- 0.5, 2) + pow(x[1] - 0.5, 2), 0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe", "- 0.5 / 1.1) assert all(abs(r - 0.5 / 1.1) < 5e-3) #", "dolfin.cells(sim.data['mesh']): cell_value = 1 if cell.midpoint().y() < 0.5 else 0 for dof in", "tdt2 = tdt[abs(tdt) < 340] print('dt', tdt) assert all(tdt2 > 0) or all(tdt2", "type: IsoSurface value: 0.5 field: c custom_hook: MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF function_space_colour: DG", "import numpy from ocellaris import Simulation, setup_simulation import pytest from helpers import skip_in_parallel", "r - 0.5 / 1.1) assert all(abs(r - 0.5 / 1.1) < 5e-3)", "= dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x, y in lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' % degree)", "sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe = sim.probes['free_surface'] # Initial value", "sim.probes['free_surface'] lines = probe.run(force_active=True) if False: from matplotlib import pyplot c = dolfin.plot(sim.data['c'])", "no going back and forth theta = numpy.arctan2(y - 0.5, x - 0.5)", "1, rho0: 1, rho1: 1} output: {log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\", [0, 1, 2])", "multiphase_solver: type: BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour: 0 solver: {type: AnalyticalSolution} boundary_conditions: [{'name': 'all',", "print('x', x, '\\ny', y) assert all(abs(y - 0.5) < 1e-12) # Results should", "# The iso surface code is not written for full parallel support assert", "# SPDX-License-Identifier: Apache-2.0 import dolfin import numpy from ocellaris import Simulation, setup_simulation import", "1: # The iso surface code is not written for full parallel support", "= sim.data['c'] dm = Vc.dofmap() arr = c.vector().get_local() for cell in dolfin.cells(sim.data['mesh']): cell_value", "import Simulation, setup_simulation import pytest from helpers import skip_in_parallel ISO_INPUT = \"\"\" ocellaris:", "setup_simulation import pytest from helpers import skip_in_parallel ISO_INPUT = \"\"\" ocellaris: type: input", "0] += 360 tdt = numpy.diff(theta) tdt2 = tdt[abs(tdt) < 340] print('dt', tdt)", "or all(xdx < 0) assert len(lines) == 1 @pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree): sim", "value: 0.5 field: c custom_hook: MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour: 0", "0.5 Vc = sim.data['Vc'] c = sim.data['c'] dm = Vc.dofmap() arr = c.vector().get_local()", "BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour: 0 solver: {type: AnalyticalSolution} boundary_conditions: [{'name': 'all', 'selector': 'code',", "test_isoline_circle(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code',", "print('y', y) print('dr', r - 0.5 / 1.1) assert all(abs(r - 0.5 /", "< 0.5 else 0 for dof in dm.cell_dofs(cell.index()): arr[dof] = cell_value c.vector().set_local(arr) c.vector().apply('insert')", "probe.value) print(len(lines)) if sim.ncpu > 1: raise pytest.skip() for x, y in lines:", "'inside_code': 'on_boundary'}] physical_properties: {nu0: 1.0, nu1: 1, rho0: 1, rho1: 1} output: {log_enabled:", "0.5 / 1.1) assert all(abs(r - 0.5 / 1.1) < 5e-3) # Check", "x, '\\ny', y) assert all(abs(y - 0.5) < 1e-12) # Results should be", "print(len(lines)) if sim.ncpu > 1: raise pytest.skip() for x, y in lines: print('x',", "False: from matplotlib import pyplot c = dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x, y in", "probe.value) print(len(lines)) for x, y in lines: # Check that the radius is", "if sim.ncpu == 1: # The iso surface code is not written for", "probe = sim.probes['free_surface'] # Initial value with sharp interface at x[1] == 0.5", "2) + pow(x[1] - 0.5, 2), 0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface']", "x - 0.5) * 180 / numpy.pi theta[theta < 0] += 360 tdt", "assert x[0] == x[-1] and y[0] == y[-1], \"The loop should be closed\"", "** 2) ** 0.5 print('x', x) print('y', y) print('dr', r - 0.5 /", "for x, y in lines: # Check that the radius is constant r", "** 2 + (y - 0.5) ** 2) ** 0.5 print('x', x) print('y',", "/ 1.1) < 5e-3) # Check that the line is clockwise or counter", "0.5, 2), 0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface'] lines = probe.run(force_active=True) if", "nu1: 1, rho0: 1, rho1: 1} output: {log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\", [0, 1,", "assert all(abs(y - 0.5) < 1e-12) # Results should be in sorted order", "< 1e-12) # Results should be in sorted order xdx = numpy.diff(x) assert", "c custom_hook: MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour: 0 solver: {type: AnalyticalSolution}", "c.vector().apply('insert') lines = probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:', Vc.dim()) print(probe.name, probe.field_name, probe.value) print(len(lines)) if", "[0, 1, 2]) def test_isoline_horizontal(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe", "input version: 1.0 mesh: type: Rectangle Nx: 4 Ny: 4 probes: - name:", "for x, y in lines: print('x', x, '\\ny', y) assert all(abs(y - 0.5)", "Initial value with sharp interface at x[1] == 0.5 Vc = sim.data['Vc'] c", "= numpy.diff(theta) tdt2 = tdt[abs(tdt) < 340] print('dt', tdt) assert all(tdt2 > 0)", "- 0.5) * 180 / numpy.pi theta[theta < 0] += 360 tdt =", "len(lines) == 1 @pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree)", "y) assert all(abs(y - 0.5) < 1e-12) # Results should be in sorted", "\"\"\" @pytest.mark.parametrize(\"degree\", [0, 1, 2]) def test_isoline_horizontal(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree)", "print('dr', r - 0.5 / 1.1) assert all(abs(r - 0.5 / 1.1) <", "sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface'] lines = probe.run(force_active=True) if False: from matplotlib import pyplot", "physical_properties: {nu0: 1.0, nu1: 1, rho0: 1, rho1: 1} output: {log_enabled: no} \"\"\"", "cell_value = 1 if cell.midpoint().y() < 0.5 else 0 for dof in dm.cell_dofs(cell.index()):", "'selector': 'code', 'inside_code': 'on_boundary'}] physical_properties: {nu0: 1.0, nu1: 1, rho0: 1, rho1: 1}", "= Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe = sim.probes['free_surface'] # Initial value with", "theta = numpy.arctan2(y - 0.5, x - 0.5) * 180 / numpy.pi theta[theta", "- 0.5, x - 0.5) * 180 / numpy.pi theta[theta < 0] +=", "all(tdt2 > 0) or all(tdt2 < 0) if sim.ncpu == 1: # The", "@pytest.mark.parametrize(\"degree\", [0, 1, 2]) def test_isoline_horizontal(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim)", "theta[theta < 0] += 360 tdt = numpy.diff(theta) tdt2 = tdt[abs(tdt) < 340]", "helpers import skip_in_parallel ISO_INPUT = \"\"\" ocellaris: type: input version: 1.0 mesh: type:", "at x[1] == 0.5 Vc = sim.data['Vc'] c = sim.data['c'] dm = Vc.dofmap()", "ISO_INPUT = \"\"\" ocellaris: type: input version: 1.0 mesh: type: Rectangle Nx: 4", "x[1] == 0.5 Vc = sim.data['Vc'] c = sim.data['c'] dm = Vc.dofmap() arr", "pytest.skip() for x, y in lines: print('x', x, '\\ny', y) assert all(abs(y -", "Apache-2.0 import dolfin import numpy from ocellaris import Simulation, setup_simulation import pytest from", "'all', 'selector': 'code', 'inside_code': 'on_boundary'}] physical_properties: {nu0: 1.0, nu1: 1, rho0: 1, rho1:", "10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5,", "= probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:', Vc.dim()) print(probe.name, probe.field_name, probe.value) print(len(lines)) if sim.ncpu >", "# Check that the line is clockwise or counter clockwise # for all", "- name: free_surface enabled: yes type: IsoSurface value: 0.5 field: c custom_hook: MultiPhaseModelUpdated", "pow(x[1] - 0.5, 2), 0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface'] lines =", "Check that the radius is constant r = ((x - 0.5) ** 2", "DG polynomial_degree_colour: 0 solver: {type: AnalyticalSolution} boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}]", "all(abs(r - 0.5 / 1.1) < 5e-3) # Check that the line is", "assert len(lines) == 1 @pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour',", "ocellaris import Simulation, setup_simulation import pytest from helpers import skip_in_parallel ISO_INPUT = \"\"\"", "and forth theta = numpy.arctan2(y - 0.5, x - 0.5) * 180 /", "0.5) ** 2 + (y - 0.5) ** 2) ** 0.5 print('x', x)", "1 @pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10)", "- 0.5 / 1.1) < 5e-3) # Check that the line is clockwise", "y in lines: print('x', x, '\\ny', y) assert all(abs(y - 0.5) < 1e-12)", "rho1: 1} output: {log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\", [0, 1, 2]) def test_isoline_horizontal(degree): sim", "for x, y in lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close() print(probe.name, probe.field_name,", "solver: {type: AnalyticalSolution} boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}] physical_properties: {nu0: 1.0,", "2 + (y - 0.5) ** 2) ** 0.5 print('x', x) print('y', y)", "{nu0: 1.0, nu1: 1, rho0: 1, rho1: 1} output: {log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\",", "sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe = sim.probes['free_surface'] # Initial value with sharp interface", "sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe = sim.probes['free_surface'] # Initial value with sharp interface at", "support assert len(lines) == 1 assert x[0] == x[-1] and y[0] == y[-1],", "0.5) < 1e-12) # Results should be in sorted order xdx = numpy.diff(x)", "numpy from ocellaris import Simulation, setup_simulation import pytest from helpers import skip_in_parallel ISO_INPUT", "Simulation, setup_simulation import pytest from helpers import skip_in_parallel ISO_INPUT = \"\"\" ocellaris: type:", "enabled: yes type: IsoSurface value: 0.5 field: c custom_hook: MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF", "ocellaris: type: input version: 1.0 mesh: type: Rectangle Nx: 4 Ny: 4 probes:", "full parallel support assert len(lines) == 1 assert x[0] == x[-1] and y[0]", "1e-12) # Results should be in sorted order xdx = numpy.diff(x) assert all(xdx", "MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour: 0 solver: {type: AnalyticalSolution} boundary_conditions: [{'name':", "for all segments, no going back and forth theta = numpy.arctan2(y - 0.5,", "{type: AnalyticalSolution} boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}] physical_properties: {nu0: 1.0, nu1:", "for full parallel support assert len(lines) == 1 assert x[0] == x[-1] and", "Rectangle Nx: 4 Ny: 4 probes: - name: free_surface enabled: yes type: IsoSurface", "boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}] physical_properties: {nu0: 1.0, nu1: 1, rho0:", "no} \"\"\" @pytest.mark.parametrize(\"degree\", [0, 1, 2]) def test_isoline_horizontal(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour',", "= 1 if cell.midpoint().y() < 0.5 else 0 for dof in dm.cell_dofs(cell.index()): arr[dof]", "pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close() print(probe.name, probe.field_name, probe.value) print(len(lines)) for x, y in lines:", "2017-2019 <NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin import numpy from ocellaris import Simulation,", "< 0) assert len(lines) == 1 @pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree): sim = Simulation()", "skip_in_parallel ISO_INPUT = \"\"\" ocellaris: type: input version: 1.0 mesh: type: Rectangle Nx:", "< 340] print('dt', tdt) assert all(tdt2 > 0) or all(tdt2 < 0) if", "+= 360 tdt = numpy.diff(theta) tdt2 = tdt[abs(tdt) < 340] print('dt', tdt) assert", "= numpy.diff(x) assert all(xdx > 0) or all(xdx < 0) assert len(lines) ==", "sim.ncpu == 1: # The iso surface code is not written for full", "probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:', Vc.dim()) print(probe.name, probe.field_name, probe.value) print(len(lines)) if sim.ncpu > 1:", "r = ((x - 0.5) ** 2 + (y - 0.5) ** 2)", "surface code is not written for full parallel support assert len(lines) == 1", "print('x', x) print('y', y) print('dr', r - 0.5 / 1.1) assert all(abs(r -", "all(xdx > 0) or all(xdx < 0) assert len(lines) == 1 @pytest.mark.parametrize(\"degree\", [1])", "all(abs(y - 0.5) < 1e-12) # Results should be in sorted order xdx", "0) if sim.ncpu == 1: # The iso surface code is not written", "IsoSurface value: 0.5 field: c custom_hook: MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour:", "= Vc.dofmap() arr = c.vector().get_local() for cell in dolfin.cells(sim.data['mesh']): cell_value = 1 if", "= sim.data['Vc'] c = sim.data['c'] dm = Vc.dofmap() arr = c.vector().get_local() for cell", "arr[dof] = cell_value c.vector().set_local(arr) c.vector().apply('insert') lines = probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:', Vc.dim()) print(probe.name,", "clockwise # for all segments, no going back and forth theta = numpy.arctan2(y", "> 0) or all(tdt2 < 0) if sim.ncpu == 1: # The iso", "degree) setup_simulation(sim) probe = sim.probes['free_surface'] # Initial value with sharp interface at x[1]", "Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5,", "'\\ny', y) assert all(abs(y - 0.5) < 1e-12) # Results should be in", "0 solver: {type: AnalyticalSolution} boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}] physical_properties: {nu0:", "from helpers import skip_in_parallel ISO_INPUT = \"\"\" ocellaris: type: input version: 1.0 mesh:", "polynomial_degree_colour: 0 solver: {type: AnalyticalSolution} boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}] physical_properties:", "((x - 0.5) ** 2 + (y - 0.5) ** 2) ** 0.5", "(C) 2017-2019 <NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin import numpy from ocellaris import", "Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe = sim.probes['free_surface'] # Initial value with sharp", "The iso surface code is not written for full parallel support assert len(lines)", "c.vector().set_local(arr) c.vector().apply('insert') lines = probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:', Vc.dim()) print(probe.name, probe.field_name, probe.value) print(len(lines))", "all(tdt2 < 0) if sim.ncpu == 1: # The iso surface code is", "import pyplot c = dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x, y in lines: pyplot.plot(x, y)", "assert all(abs(r - 0.5 / 1.1) < 5e-3) # Check that the line", "Ny: 4 probes: - name: free_surface enabled: yes type: IsoSurface value: 0.5 field:", "sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0]", "= numpy.arctan2(y - 0.5, x - 0.5) * 180 / numpy.pi theta[theta <", "that the line is clockwise or counter clockwise # for all segments, no", "0.5) ** 2) ** 0.5 print('x', x) print('y', y) print('dr', r - 0.5", "all(xdx < 0) assert len(lines) == 1 @pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree): sim =", "if cell.midpoint().y() < 0.5 else 0 for dof in dm.cell_dofs(cell.index()): arr[dof] = cell_value", "pyplot.close() print(probe.name, probe.field_name, probe.value) print(len(lines)) for x, y in lines: # Check that", "lines: # Check that the radius is constant r = ((x - 0.5)", "dm = Vc.dofmap() arr = c.vector().get_local() for cell in dolfin.cells(sim.data['mesh']): cell_value = 1", "1 if cell.midpoint().y() < 0.5 else 0 for dof in dm.cell_dofs(cell.index()): arr[dof] =", "should be in sorted order xdx = numpy.diff(x) assert all(xdx > 0) or", "be in sorted order xdx = numpy.diff(x) assert all(xdx > 0) or all(xdx", "in dolfin.cells(sim.data['mesh']): cell_value = 1 if cell.midpoint().y() < 0.5 else 0 for dof", "probe = sim.probes['free_surface'] lines = probe.run(force_active=True) if False: from matplotlib import pyplot c", "= sim.probes['free_surface'] # Initial value with sharp interface at x[1] == 0.5 Vc", "SPDX-License-Identifier: Apache-2.0 import dolfin import numpy from ocellaris import Simulation, setup_simulation import pytest", "custom_hook: MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour: 0 solver: {type: AnalyticalSolution} boundary_conditions:", "x, y in lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close() print(probe.name, probe.field_name, probe.value)", "# Check that the radius is constant r = ((x - 0.5) **", "or all(tdt2 < 0) if sim.ncpu == 1: # The iso surface code", "order xdx = numpy.diff(x) assert all(xdx > 0) or all(xdx < 0) assert", "0.5 field: c custom_hook: MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour: 0 solver:", "pytest from helpers import skip_in_parallel ISO_INPUT = \"\"\" ocellaris: type: input version: 1.0", "in lines: print('x', x, '\\ny', y) assert all(abs(y - 0.5) < 1e-12) #", "x, y in lines: # Check that the radius is constant r =", "Vc = sim.data['Vc'] c = sim.data['c'] dm = Vc.dofmap() arr = c.vector().get_local() for", "c = dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x, y in lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' %", "'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2), 0.5)' ) setup_simulation(sim)", "version: 1.0 mesh: type: Rectangle Nx: 4 Ny: 4 probes: - name: free_surface", "numpy.diff(x) assert all(xdx > 0) or all(xdx < 0) assert len(lines) == 1", "assert all(xdx > 0) or all(xdx < 0) assert len(lines) == 1 @pytest.mark.parametrize(\"degree\",", "Nx: 4 Ny: 4 probes: - name: free_surface enabled: yes type: IsoSurface value:", "type: input version: 1.0 mesh: type: Rectangle Nx: 4 Ny: 4 probes: -", "field: c custom_hook: MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour: 0 solver: {type:", "'on_boundary'}] physical_properties: {nu0: 1.0, nu1: 1, rho0: 1, rho1: 1} output: {log_enabled: no}", "- 0.5) ** 2) ** 0.5 print('x', x) print('y', y) print('dr', r -", "< 0) if sim.ncpu == 1: # The iso surface code is not", "print(probe.name, probe.field_name, probe.value) print(len(lines)) for x, y in lines: # Check that the", "lines = probe.run(force_active=True) if False: from matplotlib import pyplot c = dolfin.plot(sim.data['c']) pyplot.colorbar(c)", "- 0.5) ** 2 + (y - 0.5) ** 2) ** 0.5 print('x',", "is constant r = ((x - 0.5) ** 2 + (y - 0.5)", "1, 2]) def test_isoline_horizontal(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe =", "1 assert x[0] == x[-1] and y[0] == y[-1], \"The loop should be", "/ numpy.pi theta[theta < 0] += 360 tdt = numpy.diff(theta) tdt2 = tdt[abs(tdt)", "def test_isoline_horizontal(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe = sim.probes['free_surface'] #", "2), 0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface'] lines = probe.run(force_active=True) if False:", "10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2), 0.5)'", "sim.ncpu > 1: raise pytest.skip() for x, y in lines: print('x', x, '\\ny',", "written for full parallel support assert len(lines) == 1 assert x[0] == x[-1]", "0.5 / 1.1) < 5e-3) # Check that the line is clockwise or", "all segments, no going back and forth theta = numpy.arctan2(y - 0.5, x", "Vc.dofmap() arr = c.vector().get_local() for cell in dolfin.cells(sim.data['mesh']): cell_value = 1 if cell.midpoint().y()", "that the radius is constant r = ((x - 0.5) ** 2 +", "pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close() print(probe.name, probe.field_name, probe.value) print(len(lines)) for x, y", "'code', 'inside_code': 'on_boundary'}] physical_properties: {nu0: 1.0, nu1: 1, rho0: 1, rho1: 1} output:", "degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1]", "1.1) assert all(abs(r - 0.5 / 1.1) < 5e-3) # Check that the", "- 0.5, 2), 0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface'] lines = probe.run(force_active=True)", "Results should be in sorted order xdx = numpy.diff(x) assert all(xdx > 0)", "tdt = numpy.diff(theta) tdt2 = tdt[abs(tdt) < 340] print('dt', tdt) assert all(tdt2 >", "going back and forth theta = numpy.arctan2(y - 0.5, x - 0.5) *", "** 0.5 print('x', x) print('y', y) print('dr', r - 0.5 / 1.1) assert", "1.0 mesh: type: Rectangle Nx: 4 Ny: 4 probes: - name: free_surface enabled:", "matplotlib import pyplot c = dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x, y in lines: pyplot.plot(x,", "numpy.arctan2(y - 0.5, x - 0.5) * 180 / numpy.pi theta[theta < 0]", "print('\\nDegree:', degree, 'Vcdim:', Vc.dim()) print(probe.name, probe.field_name, probe.value) print(len(lines)) if sim.ncpu > 1: raise", "dm.cell_dofs(cell.index()): arr[dof] = cell_value c.vector().set_local(arr) c.vector().apply('insert') lines = probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:', Vc.dim())", "type: BlendedAlgebraicVOF function_space_colour: DG polynomial_degree_colour: 0 solver: {type: AnalyticalSolution} boundary_conditions: [{'name': 'all', 'selector':", "+ (y - 0.5) ** 2) ** 0.5 print('x', x) print('y', y) print('dr',", "if sim.ncpu > 1: raise pytest.skip() for x, y in lines: print('x', x,", "def test_isoline_circle(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value(", "if False: from matplotlib import pyplot c = dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x, y", "output: {log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\", [0, 1, 2]) def test_isoline_horizontal(degree): sim = Simulation()", "== 1 @pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx',", "1: raise pytest.skip() for x, y in lines: print('x', x, '\\ny', y) assert", "[{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}] physical_properties: {nu0: 1.0, nu1: 1, rho0: 1,", "(y - 0.5) ** 2) ** 0.5 print('x', x) print('y', y) print('dr', r", "0) or all(xdx < 0) assert len(lines) == 1 @pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree):", ") setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface'] lines = probe.run(force_active=True) if False: from matplotlib", "== 1: # The iso surface code is not written for full parallel", "or counter clockwise # for all segments, no going back and forth theta", "the line is clockwise or counter clockwise # for all segments, no going", "> 0) or all(xdx < 0) assert len(lines) == 1 @pytest.mark.parametrize(\"degree\", [1]) def", "> 1: raise pytest.skip() for x, y in lines: print('x', x, '\\ny', y)", "segments, no going back and forth theta = numpy.arctan2(y - 0.5, x -", "y in lines: # Check that the radius is constant r = ((x", "for dof in dm.cell_dofs(cell.index()): arr[dof] = cell_value c.vector().set_local(arr) c.vector().apply('insert') lines = probe.run(force_active=True) print('\\nDegree:',", "sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) +", "counter clockwise # for all segments, no going back and forth theta =", "dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x, y in lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close()", "probe.run(force_active=True) if False: from matplotlib import pyplot c = dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x,", "probe.field_name, probe.value) print(len(lines)) for x, y in lines: # Check that the radius", "y) pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close() print(probe.name, probe.field_name, probe.value) print(len(lines)) for x, y in", "- 0.5) < 1e-12) # Results should be in sorted order xdx =", "free_surface enabled: yes type: IsoSurface value: 0.5 field: c custom_hook: MultiPhaseModelUpdated multiphase_solver: type:", "Check that the line is clockwise or counter clockwise # for all segments,", "raise pytest.skip() for x, y in lines: print('x', x, '\\ny', y) assert all(abs(y", "0) assert len(lines) == 1 @pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT)", "import pytest from helpers import skip_in_parallel ISO_INPUT = \"\"\" ocellaris: type: input version:", "not written for full parallel support assert len(lines) == 1 assert x[0] ==", "0.5) * 180 / numpy.pi theta[theta < 0] += 360 tdt = numpy.diff(theta)", "0.5, x - 0.5) * 180 / numpy.pi theta[theta < 0] += 360", "x, y in lines: print('x', x, '\\ny', y) assert all(abs(y - 0.5) <", "in lines: # Check that the radius is constant r = ((x -", "= cell_value c.vector().set_local(arr) c.vector().apply('insert') lines = probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:', Vc.dim()) print(probe.name, probe.field_name,", "constant r = ((x - 0.5) ** 2 + (y - 0.5) **", "= tdt[abs(tdt) < 340] print('dt', tdt) assert all(tdt2 > 0) or all(tdt2 <", "parallel support assert len(lines) == 1 assert x[0] == x[-1] and y[0] ==", "print(len(lines)) for x, y in lines: # Check that the radius is constant", "= \"\"\" ocellaris: type: input version: 1.0 mesh: type: Rectangle Nx: 4 Ny:", "setup_simulation(sim) probe = sim.probes['free_surface'] # Initial value with sharp interface at x[1] ==", "len(lines) == 1 assert x[0] == x[-1] and y[0] == y[-1], \"The loop", "forth theta = numpy.arctan2(y - 0.5, x - 0.5) * 180 / numpy.pi", "numpy.pi theta[theta < 0] += 360 tdt = numpy.diff(theta) tdt2 = tdt[abs(tdt) <", "the radius is constant r = ((x - 0.5) ** 2 + (y", "dolfin import numpy from ocellaris import Simulation, setup_simulation import pytest from helpers import", "sorted order xdx = numpy.diff(x) assert all(xdx > 0) or all(xdx < 0)", "== 0.5 Vc = sim.data['Vc'] c = sim.data['c'] dm = Vc.dofmap() arr =", "+ pow(x[1] - 0.5, 2), 0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface'] lines", "is clockwise or counter clockwise # for all segments, no going back and", "from ocellaris import Simulation, setup_simulation import pytest from helpers import skip_in_parallel ISO_INPUT =", "arr = c.vector().get_local() for cell in dolfin.cells(sim.data['mesh']): cell_value = 1 if cell.midpoint().y() <", "0.5, 2) + pow(x[1] - 0.5, 2), 0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe =", "pyplot c = dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x, y in lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png'", "'1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2), 0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp'])", "in lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close() print(probe.name, probe.field_name, probe.value) print(len(lines)) for", "name: free_surface enabled: yes type: IsoSurface value: 0.5 field: c custom_hook: MultiPhaseModelUpdated multiphase_solver:", "0) or all(tdt2 < 0) if sim.ncpu == 1: # The iso surface", "/ 1.1) assert all(abs(r - 0.5 / 1.1) < 5e-3) # Check that", "1.0, nu1: 1, rho0: 1, rho1: 1} output: {log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\", [0,", "setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface'] lines = probe.run(force_active=True) if False: from matplotlib import", "function_space_colour: DG polynomial_degree_colour: 0 solver: {type: AnalyticalSolution} boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code':", "test_isoline_horizontal(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe = sim.probes['free_surface'] # Initial", "lines = probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:', Vc.dim()) print(probe.name, probe.field_name, probe.value) print(len(lines)) if sim.ncpu", "@pytest.mark.parametrize(\"degree\", [1]) def test_isoline_circle(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny',", "tdt) assert all(tdt2 > 0) or all(tdt2 < 0) if sim.ncpu == 1:", "0.5 print('x', x) print('y', y) print('dr', r - 0.5 / 1.1) assert all(abs(r", "assert all(tdt2 > 0) or all(tdt2 < 0) if sim.ncpu == 1: #", "degree) pyplot.close() print(probe.name, probe.field_name, probe.value) print(len(lines)) for x, y in lines: # Check", "< 0] += 360 tdt = numpy.diff(theta) tdt2 = tdt[abs(tdt) < 340] print('dt',", "AnalyticalSolution} boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}] physical_properties: {nu0: 1.0, nu1: 1,", "sharp interface at x[1] == 0.5 Vc = sim.data['Vc'] c = sim.data['c'] dm", "sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2)", "sim.probes['free_surface'] # Initial value with sharp interface at x[1] == 0.5 Vc =", "cell_value c.vector().set_local(arr) c.vector().apply('insert') lines = probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:', Vc.dim()) print(probe.name, probe.field_name, probe.value)", "for cell in dolfin.cells(sim.data['mesh']): cell_value = 1 if cell.midpoint().y() < 0.5 else 0", "= ((x - 0.5) ** 2 + (y - 0.5) ** 2) **", "lines: print('x', x, '\\ny', y) assert all(abs(y - 0.5) < 1e-12) # Results", "x) print('y', y) print('dr', r - 0.5 / 1.1) assert all(abs(r - 0.5", "xdx = numpy.diff(x) assert all(xdx > 0) or all(xdx < 0) assert len(lines)", "360 tdt = numpy.diff(theta) tdt2 = tdt[abs(tdt) < 340] print('dt', tdt) assert all(tdt2", "cell.midpoint().y() < 0.5 else 0 for dof in dm.cell_dofs(cell.index()): arr[dof] = cell_value c.vector().set_local(arr)", "assert len(lines) == 1 assert x[0] == x[-1] and y[0] == y[-1], \"The", "c = sim.data['c'] dm = Vc.dofmap() arr = c.vector().get_local() for cell in dolfin.cells(sim.data['mesh']):", "probes: - name: free_surface enabled: yes type: IsoSurface value: 0.5 field: c custom_hook:", "4 Ny: 4 probes: - name: free_surface enabled: yes type: IsoSurface value: 0.5", "sim.data['Vc'] c = sim.data['c'] dm = Vc.dofmap() arr = c.vector().get_local() for cell in", "probe.field_name, probe.value) print(len(lines)) if sim.ncpu > 1: raise pytest.skip() for x, y in", "'Vcdim:', Vc.dim()) print(probe.name, probe.field_name, probe.value) print(len(lines)) if sim.ncpu > 1: raise pytest.skip() for", "# Results should be in sorted order xdx = numpy.diff(x) assert all(xdx >", "lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close() print(probe.name, probe.field_name, probe.value) print(len(lines)) for x,", "4 probes: - name: free_surface enabled: yes type: IsoSurface value: 0.5 field: c", "0.5 else 0 for dof in dm.cell_dofs(cell.index()): arr[dof] = cell_value c.vector().set_local(arr) c.vector().apply('insert') lines", "pyplot.colorbar(c) for x, y in lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close() print(probe.name,", "= c.vector().get_local() for cell in dolfin.cells(sim.data['mesh']): cell_value = 1 if cell.midpoint().y() < 0.5", "dof in dm.cell_dofs(cell.index()): arr[dof] = cell_value c.vector().set_local(arr) c.vector().apply('insert') lines = probe.run(force_active=True) print('\\nDegree:', degree,", "iso surface code is not written for full parallel support assert len(lines) ==", "import skip_in_parallel ISO_INPUT = \"\"\" ocellaris: type: input version: 1.0 mesh: type: Rectangle", "is not written for full parallel support assert len(lines) == 1 assert x[0]", "back and forth theta = numpy.arctan2(y - 0.5, x - 0.5) * 180", "degree, 'Vcdim:', Vc.dim()) print(probe.name, probe.field_name, probe.value) print(len(lines)) if sim.ncpu > 1: raise pytest.skip()", "2]) def test_isoline_horizontal(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) setup_simulation(sim) probe = sim.probes['free_surface']", "numpy.diff(theta) tdt2 = tdt[abs(tdt) < 340] print('dt', tdt) assert all(tdt2 > 0) or", "\"\"\" ocellaris: type: input version: 1.0 mesh: type: Rectangle Nx: 4 Ny: 4", "radius is constant r = ((x - 0.5) ** 2 + (y -", "print('dt', tdt) assert all(tdt2 > 0) or all(tdt2 < 0) if sim.ncpu ==", "y in lines: pyplot.plot(x, y) pyplot.savefig('test_isoline_circle_%d.png' % degree) pyplot.close() print(probe.name, probe.field_name, probe.value) print(len(lines))", "in dm.cell_dofs(cell.index()): arr[dof] = cell_value c.vector().set_local(arr) c.vector().apply('insert') lines = probe.run(force_active=True) print('\\nDegree:', degree, 'Vcdim:',", "yes type: IsoSurface value: 0.5 field: c custom_hook: MultiPhaseModelUpdated multiphase_solver: type: BlendedAlgebraicVOF function_space_colour:", "import dolfin import numpy from ocellaris import Simulation, setup_simulation import pytest from helpers", "<NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin import numpy from ocellaris import Simulation, setup_simulation", "= sim.probes['free_surface'] lines = probe.run(force_active=True) if False: from matplotlib import pyplot c =", "% degree) pyplot.close() print(probe.name, probe.field_name, probe.value) print(len(lines)) for x, y in lines: #", "# Copyright (C) 2017-2019 <NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin import numpy from", "sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] -", "value with sharp interface at x[1] == 0.5 Vc = sim.data['Vc'] c =", "type: Rectangle Nx: 4 Ny: 4 probes: - name: free_surface enabled: yes type:", "cell in dolfin.cells(sim.data['mesh']): cell_value = 1 if cell.midpoint().y() < 0.5 else 0 for", "code is not written for full parallel support assert len(lines) == 1 assert", "Copyright (C) 2017-2019 <NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin import numpy from ocellaris", "sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2), 0.5)' )", "else 0 for dof in dm.cell_dofs(cell.index()): arr[dof] = cell_value c.vector().set_local(arr) c.vector().apply('insert') lines =", "y) print('dr', r - 0.5 / 1.1) assert all(abs(r - 0.5 / 1.1)", "tdt[abs(tdt) < 340] print('dt', tdt) assert all(tdt2 > 0) or all(tdt2 < 0)", "rho0: 1, rho1: 1} output: {log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\", [0, 1, 2]) def", "0.5)' ) setup_simulation(sim) sim.data['c'].assign(sim.data['cp']) probe = sim.probes['free_surface'] lines = probe.run(force_active=True) if False: from", "340] print('dt', tdt) assert all(tdt2 > 0) or all(tdt2 < 0) if sim.ncpu", "[1]) def test_isoline_circle(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT) sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree) sim.input.set_value('mesh/Nx', 10) sim.input.set_value('mesh/Ny', 10)", "5e-3) # Check that the line is clockwise or counter clockwise # for", "1.1) < 5e-3) # Check that the line is clockwise or counter clockwise", "# for all segments, no going back and forth theta = numpy.arctan2(y -", "{log_enabled: no} \"\"\" @pytest.mark.parametrize(\"degree\", [0, 1, 2]) def test_isoline_horizontal(degree): sim = Simulation() sim.input.read_yaml(yaml_string=ISO_INPUT)", "clockwise or counter clockwise # for all segments, no going back and forth", "sim.input.set_value('mesh/Ny', 10) sim.input.set_value( 'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2),", "from matplotlib import pyplot c = dolfin.plot(sim.data['c']) pyplot.colorbar(c) for x, y in lines:", "2) ** 0.5 print('x', x) print('y', y) print('dr', r - 0.5 / 1.1)", "mesh: type: Rectangle Nx: 4 Ny: 4 probes: - name: free_surface enabled: yes", "* 180 / numpy.pi theta[theta < 0] += 360 tdt = numpy.diff(theta) tdt2", "# Initial value with sharp interface at x[1] == 0.5 Vc = sim.data['Vc']" ]
[ "to determine significance def isSignificant(xval,yval, xthr = 1, ythr = 2): if abs(xval)", "import pandas as pd import numpy as np import matplotlib.pyplot as plt import", "# Function to determine significance def isSignificant(xval,yval, xthr = 1, ythr = 2):", "csv_files = [f for f in csv_files if \"Line\" in f and \".csv\"", "for f in csv_files if \"Line\" in f and \".csv\" in f] #", "abs(yval) >= ythr: return True else: return False # Read Entrez -> Name", "csv_files if \"Line\" in f and \".csv\" in f] # Function to determine", "= 1, ythr = 2): if abs(xval) >= xthr and abs(yval) >= ythr:", "# Read Entrez -> Name map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file in", "[f for f in csv_files if \"Line\" in f and \".csv\" in f]", "True else: return False # Read Entrez -> Name map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\",", "pandas as pd import numpy as np import matplotlib.pyplot as plt import os", "csv_files: print(\"Processing file {}\".format(csv_file)) df = pd.read_csv(csv_file, header=0) df = df.rename(columns={\"Unnamed: 0\":\"gename\"}) x", "df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName'] = [\"cge:\" + str(id) for id", "as np import matplotlib.pyplot as plt import os csv_files = os.listdir(os.getcwd()) csv_files =", "= 2): if abs(xval) >= xthr and abs(yval) >= ythr: return True else:", "= df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName'] = [\"cge:\" + str(id) for", "df.rename(columns={\"Unnamed: 0\":\"gename\"}) x = df['log2FoldChange'].values y = df['padj'].values + 1e-5 y = -np.log10(y)", "import os csv_files = os.listdir(os.getcwd()) csv_files = [f for f in csv_files if", ">= ythr: return True else: return False # Read Entrez -> Name map", "= [i for i in range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx = [i for i", "alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0, 5, linestyles='dashed') plt.vlines(1, 0, 5, linestyles='dashed') plt.hlines(2, min(x), max(x),", "significant differentially expressed genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName']", "\".csv\" in f] # Function to determine significance def isSignificant(xval,yval, xthr = 1,", "abs(xval) >= xthr and abs(yval) >= ythr: return True else: return False #", "y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0, 5, linestyles='dashed') plt.vlines(1, 0, 5, linestyles='dashed') plt.hlines(2,", "tmp_df, on=\"gename\") final_df['keggGeneName'] = [\"cge:\" + str(id) for id in list(final_df['geneid'])] # Required", "names of significant differentially expressed genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName, tmp_df,", "= df['padj'].values + 1e-5 y = -np.log10(y) significant_idx = [i for i in", "f and \".csv\" in f] # Function to determine significance def isSignificant(xval,yval, xthr", "for csv_file in csv_files: print(\"Processing file {}\".format(csv_file)) df = pd.read_csv(csv_file, header=0) df =", "[\"cge:\" + str(id) for id in list(final_df['geneid'])] # Required for pathway analysis with", "= os.listdir(os.getcwd()) csv_files = [f for f in csv_files if \"Line\" in f", "range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx = [i for i in range(len(x)) if not isSignificant(x[i],y[i])]", "Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant')", "str(id) for id in list(final_df['geneid'])] # Required for pathway analysis with ROntoTools final_df.to_csv(csv_file.replace(\".csv\",\"_SignificantGenes.csv\"),", "label='Nonsignificant') plt.vlines(-1, 0, 5, linestyles='dashed') plt.vlines(1, 0, 5, linestyles='dashed') plt.hlines(2, min(x), max(x), linestyles='dashed')", "if not isSignificant(x[i],y[i])] # Plot Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant')", "plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names of significant differentially expressed genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df", "differentially expressed genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName'] =", "pd.read_csv(csv_file, header=0) df = df.rename(columns={\"Unnamed: 0\":\"gename\"}) x = df['log2FoldChange'].values y = df['padj'].values +", "0, 5, linestyles='dashed') plt.hlines(2, min(x), max(x), linestyles='dashed') plt.xlabel('Log2 Fold Change') plt.ylabel('-log10 (adjusted p-value)')", "= pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file in csv_files: print(\"Processing file {}\".format(csv_file)) df = pd.read_csv(csv_file,", "xthr and abs(yval) >= ythr: return True else: return False # Read Entrez", "i in range(len(x)) if not isSignificant(x[i],y[i])] # Plot Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx],", "Change') plt.ylabel('-log10 (adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names of significant differentially expressed", "in csv_files if \"Line\" in f and \".csv\" in f] # Function to", "plt import os csv_files = os.listdir(os.getcwd()) csv_files = [f for f in csv_files", "csv_files = os.listdir(os.getcwd()) csv_files = [f for f in csv_files if \"Line\" in", "numpy as np import matplotlib.pyplot as plt import os csv_files = os.listdir(os.getcwd()) csv_files", "pd import numpy as np import matplotlib.pyplot as plt import os csv_files =", "if \"Line\" in f and \".csv\" in f] # Function to determine significance", "x = df['log2FoldChange'].values y = df['padj'].values + 1e-5 y = -np.log10(y) significant_idx =", "final_df = pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName'] = [\"cge:\" + str(id) for id in", "as pd import numpy as np import matplotlib.pyplot as plt import os csv_files", "pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName'] = [\"cge:\" + str(id) for id in list(final_df['geneid'])] #", "if isSignificant(x[i],y[i])] nonsignificant_idx = [i for i in range(len(x)) if not isSignificant(x[i],y[i])] #", "def isSignificant(xval,yval, xthr = 1, ythr = 2): if abs(xval) >= xthr and", "(adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names of significant differentially expressed genes tmp_df", "not isSignificant(x[i],y[i])] # Plot Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx],", "isSignificant(x[i],y[i])] # Plot Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx],", "+ str(id) for id in list(final_df['geneid'])] # Required for pathway analysis with ROntoTools", "file {}\".format(csv_file)) df = pd.read_csv(csv_file, header=0) df = df.rename(columns={\"Unnamed: 0\":\"gename\"}) x = df['log2FoldChange'].values", "header=0) for csv_file in csv_files: print(\"Processing file {}\".format(csv_file)) df = pd.read_csv(csv_file, header=0) df", "os.listdir(os.getcwd()) csv_files = [f for f in csv_files if \"Line\" in f and", "= pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName'] = [\"cge:\" + str(id) for id in list(final_df['geneid'])]", "Function to determine significance def isSignificant(xval,yval, xthr = 1, ythr = 2): if", "\"Line\" in f and \".csv\" in f] # Function to determine significance def", "import numpy as np import matplotlib.pyplot as plt import os csv_files = os.listdir(os.getcwd())", "Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1,", "c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0, 5, linestyles='dashed') plt.vlines(1, 0, 5, linestyles='dashed') plt.hlines(2, min(x),", "0, 5, linestyles='dashed') plt.vlines(1, 0, 5, linestyles='dashed') plt.hlines(2, min(x), max(x), linestyles='dashed') plt.xlabel('Log2 Fold", "of significant differentially expressed genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName, tmp_df, on=\"gename\")", "ythr: return True else: return False # Read Entrez -> Name map entrezToName", "isSignificant(x[i],y[i])] nonsignificant_idx = [i for i in range(len(x)) if not isSignificant(x[i],y[i])] # Plot", "nonsignificant_idx = [i for i in range(len(x)) if not isSignificant(x[i],y[i])] # Plot Volcano", "in csv_files: print(\"Processing file {}\".format(csv_file)) df = pd.read_csv(csv_file, header=0) df = df.rename(columns={\"Unnamed: 0\":\"gename\"})", "# Plot Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue',", "df = pd.read_csv(csv_file, header=0) df = df.rename(columns={\"Unnamed: 0\":\"gename\"}) x = df['log2FoldChange'].values y =", "alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0, 5, linestyles='dashed') plt.vlines(1, 0,", "significant_idx = [i for i in range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx = [i for", "pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file in csv_files: print(\"Processing file {}\".format(csv_file)) df = pd.read_csv(csv_file, header=0)", "plt.hlines(2, min(x), max(x), linestyles='dashed') plt.xlabel('Log2 Fold Change') plt.ylabel('-log10 (adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) #", "0\":\"gename\"}) x = df['log2FoldChange'].values y = df['padj'].values + 1e-5 y = -np.log10(y) significant_idx", "# Save names of significant differentially expressed genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df =", "os csv_files = os.listdir(os.getcwd()) csv_files = [f for f in csv_files if \"Line\"", "ythr = 2): if abs(xval) >= xthr and abs(yval) >= ythr: return True", "plt.xlabel('Log2 Fold Change') plt.ylabel('-log10 (adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names of significant", "2): if abs(xval) >= xthr and abs(yval) >= ythr: return True else: return", "range(len(x)) if not isSignificant(x[i],y[i])] # Plot Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35,", "y[significant_idx], c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0, 5, linestyles='dashed')", "Plot Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35,", "isSignificant(xval,yval, xthr = 1, ythr = 2): if abs(xval) >= xthr and abs(yval)", "label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0, 5, linestyles='dashed') plt.vlines(1, 0, 5,", "for i in range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx = [i for i in range(len(x))", "[i for i in range(len(x)) if not isSignificant(x[i],y[i])] # Plot Volcano Plot plt.figure(figsize=(8,8))", "1e-5 y = -np.log10(y) significant_idx = [i for i in range(len(x)) if isSignificant(x[i],y[i])]", "return False # Read Entrez -> Name map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0) for", "Fold Change') plt.ylabel('-log10 (adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names of significant differentially", "False # Read Entrez -> Name map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file", "plt.vlines(-1, 0, 5, linestyles='dashed') plt.vlines(1, 0, 5, linestyles='dashed') plt.hlines(2, min(x), max(x), linestyles='dashed') plt.xlabel('Log2", "df['padj'].values + 1e-5 y = -np.log10(y) significant_idx = [i for i in range(len(x))", "c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0, 5, linestyles='dashed') plt.vlines(1,", "in f and \".csv\" in f] # Function to determine significance def isSignificant(xval,yval,", "matplotlib.pyplot as plt import os csv_files = os.listdir(os.getcwd()) csv_files = [f for f", "= df.rename(columns={\"Unnamed: 0\":\"gename\"}) x = df['log2FoldChange'].values y = df['padj'].values + 1e-5 y =", "= -np.log10(y) significant_idx = [i for i in range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx =", "Name map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file in csv_files: print(\"Processing file {}\".format(csv_file))", "map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file in csv_files: print(\"Processing file {}\".format(csv_file)) df", "max(x), linestyles='dashed') plt.xlabel('Log2 Fold Change') plt.ylabel('-log10 (adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names", "for id in list(final_df['geneid'])] # Required for pathway analysis with ROntoTools final_df.to_csv(csv_file.replace(\".csv\",\"_SignificantGenes.csv\"), index=False)", "in range(len(x)) if not isSignificant(x[i],y[i])] # Plot Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red',", "= [f for f in csv_files if \"Line\" in f and \".csv\" in", "min(x), max(x), linestyles='dashed') plt.xlabel('Log2 Fold Change') plt.ylabel('-log10 (adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save", "else: return False # Read Entrez -> Name map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0)", "Save names of significant differentially expressed genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName,", "genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName'] = [\"cge:\" +", "for i in range(len(x)) if not isSignificant(x[i],y[i])] # Plot Volcano Plot plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx],", "= [i for i in range(len(x)) if not isSignificant(x[i],y[i])] # Plot Volcano Plot", "<filename>DifferentialExpression/05_Volcano_Plots.py import pandas as pd import numpy as np import matplotlib.pyplot as plt", "plt.figure(figsize=(8,8)) plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0,", "in f] # Function to determine significance def isSignificant(xval,yval, xthr = 1, ythr", "df['log2FoldChange'].values y = df['padj'].values + 1e-5 y = -np.log10(y) significant_idx = [i for", "linestyles='dashed') plt.hlines(2, min(x), max(x), linestyles='dashed') plt.xlabel('Log2 Fold Change') plt.ylabel('-log10 (adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\"))", "= pd.read_csv(csv_file, header=0) df = df.rename(columns={\"Unnamed: 0\":\"gename\"}) x = df['log2FoldChange'].values y = df['padj'].values", "[i for i in range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx = [i for i in", "plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0, 5, linestyles='dashed') plt.vlines(1, 0, 5, linestyles='dashed')", "entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file in csv_files: print(\"Processing file {}\".format(csv_file)) df =", "= df['log2FoldChange'].values y = df['padj'].values + 1e-5 y = -np.log10(y) significant_idx = [i", "p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names of significant differentially expressed genes tmp_df =", "and \".csv\" in f] # Function to determine significance def isSignificant(xval,yval, xthr =", "{}\".format(csv_file)) df = pd.read_csv(csv_file, header=0) df = df.rename(columns={\"Unnamed: 0\":\"gename\"}) x = df['log2FoldChange'].values y", "in range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx = [i for i in range(len(x)) if not", "-np.log10(y) significant_idx = [i for i in range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx = [i", "= [\"cge:\" + str(id) for id in list(final_df['geneid'])] # Required for pathway analysis", "and abs(yval) >= ythr: return True else: return False # Read Entrez ->", "+ 1e-5 y = -np.log10(y) significant_idx = [i for i in range(len(x)) if", "plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names of significant differentially expressed genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True)", "xthr = 1, ythr = 2): if abs(xval) >= xthr and abs(yval) >=", "df = df.rename(columns={\"Unnamed: 0\":\"gename\"}) x = df['log2FoldChange'].values y = df['padj'].values + 1e-5 y", "np import matplotlib.pyplot as plt import os csv_files = os.listdir(os.getcwd()) csv_files = [f", "y = -np.log10(y) significant_idx = [i for i in range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx", "i in range(len(x)) if isSignificant(x[i],y[i])] nonsignificant_idx = [i for i in range(len(x)) if", "y = df['padj'].values + 1e-5 y = -np.log10(y) significant_idx = [i for i", "Read Entrez -> Name map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file in csv_files:", "as plt import os csv_files = os.listdir(os.getcwd()) csv_files = [f for f in", "f in csv_files if \"Line\" in f and \".csv\" in f] # Function", "determine significance def isSignificant(xval,yval, xthr = 1, ythr = 2): if abs(xval) >=", "csv_file in csv_files: print(\"Processing file {}\".format(csv_file)) df = pd.read_csv(csv_file, header=0) df = df.rename(columns={\"Unnamed:", "import matplotlib.pyplot as plt import os csv_files = os.listdir(os.getcwd()) csv_files = [f for", "5, linestyles='dashed') plt.hlines(2, min(x), max(x), linestyles='dashed') plt.xlabel('Log2 Fold Change') plt.ylabel('-log10 (adjusted p-value)') plt.legend()", "if abs(xval) >= xthr and abs(yval) >= ythr: return True else: return False", "significance def isSignificant(xval,yval, xthr = 1, ythr = 2): if abs(xval) >= xthr", "expressed genes tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName'] = [\"cge:\"", "on=\"gename\") final_df['keggGeneName'] = [\"cge:\" + str(id) for id in list(final_df['geneid'])] # Required for", "5, linestyles='dashed') plt.vlines(1, 0, 5, linestyles='dashed') plt.hlines(2, min(x), max(x), linestyles='dashed') plt.xlabel('Log2 Fold Change')", "1, ythr = 2): if abs(xval) >= xthr and abs(yval) >= ythr: return", "return True else: return False # Read Entrez -> Name map entrezToName =", "Entrez -> Name map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file in csv_files: print(\"Processing", "print(\"Processing file {}\".format(csv_file)) df = pd.read_csv(csv_file, header=0) df = df.rename(columns={\"Unnamed: 0\":\"gename\"}) x =", "f] # Function to determine significance def isSignificant(xval,yval, xthr = 1, ythr =", "linestyles='dashed') plt.xlabel('Log2 Fold Change') plt.ylabel('-log10 (adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names of", "plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant') plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant') plt.vlines(-1, 0, 5,", ">= xthr and abs(yval) >= ythr: return True else: return False # Read", "plt.vlines(1, 0, 5, linestyles='dashed') plt.hlines(2, min(x), max(x), linestyles='dashed') plt.xlabel('Log2 Fold Change') plt.ylabel('-log10 (adjusted", "-> Name map entrezToName = pd.read_csv(\"EntrezToNameMap.csv\", header=0) for csv_file in csv_files: print(\"Processing file", "header=0) df = df.rename(columns={\"Unnamed: 0\":\"gename\"}) x = df['log2FoldChange'].values y = df['padj'].values + 1e-5", "tmp_df = df.iloc[significant_idx,:].reset_index(drop=True) final_df = pd.merge(entrezToName, tmp_df, on=\"gename\") final_df['keggGeneName'] = [\"cge:\" + str(id)", "final_df['keggGeneName'] = [\"cge:\" + str(id) for id in list(final_df['geneid'])] # Required for pathway", "linestyles='dashed') plt.vlines(1, 0, 5, linestyles='dashed') plt.hlines(2, min(x), max(x), linestyles='dashed') plt.xlabel('Log2 Fold Change') plt.ylabel('-log10", "plt.ylabel('-log10 (adjusted p-value)') plt.legend() plt.savefig(csv_file.replace(\".csv\",\"_volcanoPlot.pdf\")) # Save names of significant differentially expressed genes" ]
[ "Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name',", "type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male", "= rules_info if club.rules: for rule in club.rules: rules_entry = {'rule': str(rule)} rules_info.append(rules_entry)", "= GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout", "for cls in instance_manager.types.values()] return [] def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as", "label='Remove Sim From Club') as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering')", "label='Female Child Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return [cls.__name__", "Data') as cheat: cheat.add_token_param('club_id') def get_buck_amounts(): return (1, 10, 100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount',", "if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str", "club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name',", "'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))}", "Name', width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema: sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria',", "with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat: cheat.add_token_param('club_id') def get_buck_amounts(): return (1, 10,", "club.members: group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)} members_info.append(group_members_entry) entry['club_recent_members']", "is None: return sim_info_manager = services.sim_info_manager() club_info = [] for club in club_service.clubs:", "GsiGridSchema, GsiFieldVisualizers import services import sims4.resources club_schema = GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING)", "= criteria_info if club.membership_criteria: for criteria in club.membership_criteria: criteria_entry = {'criteria': str(criteria)} criteria_info.append(criteria_entry)", "GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout Location',", "with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as", "GsiHandler from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers import services import sims4.resources club_schema = GsiGridSchema(label='Club", "label='Sim Name', width=0.4) sub_schema.add_field('is_leader', label='Is Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema:", "ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema:", "club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color',", "'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info = [] entry['club_members'] = members_info for", "cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe", "if club_service is None: return sim_info_manager = services.sim_info_manager() club_info = [] for club", "type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded:", "label='Start Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat: cheat.add_token_param('club_id')", "= {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id':", "cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID',", "'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info = [] entry['club_members'] = members_info for sim in club.members: group_members_entry", "club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return", "(1, 10, 100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount',", "{}'.format(club.hangout_zone_id) else: club_hangout_str = 'None' entry = {'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str,", "sub_schema.add_field('is_leader', label='Is Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID',", "str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)),", "Club') as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat: cheat.add_token_param('club_id')", "cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data')", "club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id) else:", "Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if", "width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema: sub_schema.add_field('rule',", "= services.sim_info_manager() club_info = [] for club in club_service.clubs: if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE:", "[cls.__name__ for cls in instance_manager.types.values()] return [] def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club')", "[{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids] rules_info = [] entry['club_rules'] =", "label='Criteria') @GsiHandler('club_info', club_schema) def generate_club_info_data(): club_service = services.get_club_service() if club_service is None: return", "label='Create Club') as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat:", "as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat: cheat.add_token_param('club_id') with", "with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema: sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership", "str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids] rules_info = [] entry['club_rules'] = rules_info", "label='Is Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35)", "[] for club in club_service.clubs: if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue))", "cheat: cheat.add_token_param('club_id') def get_buck_amounts(): return (1, 10, 100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club", "as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema:", "club_service = services.get_club_service() if club_service is None: return sim_info_manager = services.sim_info_manager() club_info =", "Club Bucks') as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members')", "club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager", "Club') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as cheat: cheat.add_token_param('sim_id')", "Club') as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat: cheat.add_token_param('club_id')", "club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)),", "{}'.format(str(club.hangout_venue)) elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str = 'None'", "services.get_club_service() if club_service is None: return sim_info_manager = services.sim_info_manager() club_info = [] for", "cls in instance_manager.types.values()] return [] def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat:", "clubs.club_enums import ClubHangoutSetting from sims4.gsi.dispatcher import GsiHandler from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers import", "sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers import services import sims4.resources club_schema = GsiGridSchema(label='Club Info') club_schema.add_field('name',", "club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as", "Bucks') as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as", "club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult", "with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema) def generate_club_info_data():", "criteria_info if club.membership_criteria: for criteria in club.membership_criteria: criteria_entry = {'criteria': str(criteria)} criteria_info.append(criteria_entry) club_info.append(entry)", "unique_field=True) club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child Uniform',", "type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child", "'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info = [] entry['club_members'] =", "club in club_service.clubs: if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting", "import GsiHandler from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers import services import sims4.resources club_schema =", "sub_schema: sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info',", "Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult',", "label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema) def", "in instance_manager.types.values()] return [] def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat: cheat.add_token_param('club_seed',", "label='Club Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) sub_schema.add_field('is_leader',", "services.sim_info_manager() club_info = [] for club in club_service.clubs: if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str", "Club Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat: cheat.add_token_param('club_id') with", "== ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str = 'None' entry = {'name':", "'is_leader': str(sim is club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id", "= services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return [cls.__name__ for cls in instance_manager.types.values()] return [] def", "def generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return [cls.__name__ for cls in instance_manager.types.values()]", "else: club_hangout_str = 'None' entry = {'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color':", "if club.membership_criteria: for criteria in club.membership_criteria: criteria_entry = {'criteria': str(criteria)} criteria_info.append(criteria_entry) club_info.append(entry) return", "str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)),", "Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) def", "club_service.clubs: if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT:", "cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start", "members_info for sim in club.members: group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim", "cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering')", "with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim", "= [] entry['membership_criteria'] = criteria_info if club.membership_criteria: for criteria in club.membership_criteria: criteria_entry =", "sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema) def generate_club_info_data(): club_service = services.get_club_service() if club_service is", "club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str =", "club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat:", "Name', width=0.4) sub_schema.add_field('is_leader', label='Is Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema: sub_schema.add_field('sim_id',", "with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim", "generate_club_info_data(): club_service = services.get_club_service() if club_service is None: return sim_info_manager = services.sim_info_manager() club_info", "label='Club Rules') as sub_schema: sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema:", "Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult',", "GsiFieldVisualizers import services import sims4.resources club_schema = GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id',", "club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids] rules_info", "if club.rules: for rule in club.rules: rules_entry = {'rule': str(rule)} rules_info.append(rules_entry) criteria_info =", "as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat: cheat.add_token_param('club_id') with", "str(bool(club.uniform_female_adult))} members_info = [] entry['club_members'] = members_info for sim in club.members: group_members_entry =", "cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove", "= {'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else 'None',", "str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))}", "= [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids] rules_info = [] entry['club_rules']", "sims4.gsi.dispatcher import GsiHandler from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers import services import sims4.resources club_schema", "from clubs.club_enums import ClubHangoutSetting from sims4.gsi.dispatcher import GsiHandler from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers", "import sims4.resources club_schema = GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING,", "str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name':", "in club_service.clubs: if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting ==", "for sim_id in club._recent_member_ids] rules_info = [] entry['club_rules'] = rules_info if club.rules: for", "def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id',", "in club._recent_member_ids] rules_info = [] entry['club_rules'] = rules_info if club.rules: for rule in", "return [cls.__name__ for cls in instance_manager.types.values()] return [] def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create", "label='Refresh Safe Data') as cheat: cheat.add_token_param('club_id') def get_buck_amounts(): return (1, 10, 100, 1000)", "label='Membership Criteria') as sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema) def generate_club_info_data(): club_service = services.get_club_service()", "for rule in club.rules: rules_entry = {'rule': str(rule)} rules_info.append(rules_entry) criteria_info = [] entry['membership_criteria']", "add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove", "= {'rule': str(rule)} rules_info.append(rules_entry) criteria_info = [] entry['membership_criteria'] = criteria_info if club.membership_criteria: for", "sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) sub_schema.add_field('is_leader', label='Is Leader') with", "club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club", "entry['membership_criteria'] = criteria_info if club.membership_criteria: for criteria in club.membership_criteria: criteria_entry = {'criteria': str(criteria)}", "club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str = 'None' entry =", "GsiGridSchema, label='Membership Criteria') as sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema) def generate_club_info_data(): club_service =", "sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules')", "with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End", "label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING)", "as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id')", "club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child", "Criteria') as sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema) def generate_club_info_data(): club_service = services.get_club_service() if", "GsiGridSchema, label='Club Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4)", "@GsiHandler('club_info', club_schema) def generate_club_info_data(): club_service = services.get_club_service() if club_service is None: return sim_info_manager", "sim.full_name, 'is_leader': str(sim is club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for", "type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds():", "club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat:", "label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child',", "entry['club_members'] = members_info for sim in club.members: group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name,", "width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema: sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema,", "str(sim is club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in", "1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with", "instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return [cls.__name__ for cls in instance_manager.types.values()] return []", "[] entry['club_members'] = members_info for sim in club.members: group_members_entry = {'sim_id': str(sim.id), 'sim_name':", "= 'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str = 'None' entry = {'name': str(club), 'club_id': str(club.club_id),", "Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING)", "Child Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return [cls.__name__ for", "club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema) def generate_club_info_data(): club_service", "from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers import services import sims4.resources club_schema = GsiGridSchema(label='Club Info')", "group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] =", "generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return [cls.__name__ for cls in instance_manager.types.values()] return", "with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club')", "import services import sims4.resources club_schema = GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club", "Safe Data') as cheat: cheat.add_token_param('club_id') def get_buck_amounts(): return (1, 10, 100, 1000) with", "as cheat: cheat.add_token_param('club_id') def get_buck_amounts(): return (1, 10, 100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add", "with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members',", "sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club", "club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema: sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria')", "= [] entry['club_members'] = members_info for sim in club.members: group_members_entry = {'sim_id': str(sim.id),", "import GsiGridSchema, GsiFieldVisualizers import services import sims4.resources club_schema = GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name',", "if instance_manager.all_instances_loaded: return [cls.__name__ for cls in instance_manager.types.values()] return [] def add_club(manager): with", "width=0.4) sub_schema.add_field('is_leader', label='Is Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim", "{'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id': str(sim_id),", "Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED)", "in club.rules: rules_entry = {'rule': str(rule)} rules_info.append(rules_entry) criteria_info = [] entry['membership_criteria'] = criteria_info", "ClubHangoutSetting from sims4.gsi.dispatcher import GsiHandler from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers import services import", "club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING)", "club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child Uniform',", "club_hangout_str = 'None' entry = {'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color)", "as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) sub_schema.add_field('is_leader', label='Is Leader')", "label='Recent Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) with", "= members_info for sim in club.members: group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader':", "else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info = []", "= 'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str", "Rules') as sub_schema: sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema: sub_schema.add_field('criteria',", "club_info = [] for club in club_service.clubs: if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str =", "type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return [cls.__name__ for cls in", "cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with", "Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat: cheat.add_token_param('club_id') def", "club.membership_criteria: for criteria in club.membership_criteria: criteria_entry = {'criteria': str(criteria)} criteria_info.append(criteria_entry) club_info.append(entry) return club_info", "def get_buck_amounts(): return (1, 10, 100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as", "sim in club.members: group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)}", "ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id)", "cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat: cheat.add_token_param('club_id') def get_buck_amounts(): return", "100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id')", "sim_id in club._recent_member_ids] rules_info = [] entry['club_rules'] = rules_info if club.rules: for rule", "str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info = [] entry['club_members'] = members_info for sim in club.members:", "def generate_club_info_data(): club_service = services.get_club_service() if club_service is None: return sim_info_manager = services.sim_info_manager()", "str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids] rules_info = [] entry['club_rules'] = rules_info if club.rules:", "cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id',", "Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return [cls.__name__ for cls", "as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat: cheat.add_token_param('club_id') def get_buck_amounts():", "return sim_info_manager = services.sim_info_manager() club_info = [] for club in club_service.clubs: if club.hangout_setting", "club_schema) def generate_club_info_data(): club_service = services.get_club_service() if club_service is None: return sim_info_manager =", "cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim", "services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From", "Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) with club_schema.add_has_many('club_rules',", "{'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child':", "label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as", "label='Male Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult Uniform',", "str(rule)} rules_info.append(rules_entry) criteria_info = [] entry['membership_criteria'] = criteria_info if club.membership_criteria: for criteria in", "cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name',", "dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35)", "club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING)", "ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male", "dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim", "sims4.resources club_schema = GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True)", "club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as", "sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema) def generate_club_info_data(): club_service = services.get_club_service() if club_service is None:", "as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema,", "return [] def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club)", "rules_info if club.rules: for rule in club.rules: rules_entry = {'rule': str(rule)} rules_info.append(rules_entry) criteria_info", "with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club')", "= services.get_club_service() if club_service is None: return sim_info_manager = services.sim_info_manager() club_info = []", "'associated_color': str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult':", "entry = {'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else", "cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id',", "rules_info.append(rules_entry) criteria_info = [] entry['membership_criteria'] = criteria_info if club.membership_criteria: for criteria in club.membership_criteria:", "== ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone:", "in club.members: group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)} members_info.append(group_members_entry)", "'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info = [] entry['club_members'] = members_info for sim in", "elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str = 'None' entry", "Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club',", "cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id',", "[] entry['club_rules'] = rules_info if club.rules: for rule in club.rules: rules_entry = {'rule':", "instance_manager.types.values()] return [] def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds)", "'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child':", "ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) sub_schema.add_field('is_leader', label='Is Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent", "= [] for club in club_service.clubs: if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue:", "[] entry['membership_criteria'] = criteria_info if club.membership_criteria: for criteria in club.membership_criteria: criteria_entry = {'criteria':", "entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids] rules_info = []", "services import sims4.resources club_schema = GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID',", "cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema: sub_schema.add_field('sim_id',", "import ClubHangoutSetting from sims4.gsi.dispatcher import GsiHandler from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers import services", "from sims4.gsi.dispatcher import GsiHandler from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers import services import sims4.resources", "From Club') as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat:", "type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color',", "sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema)", "GsiGridSchema, label='Recent Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4)", "club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info =", "label='Remove Club') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as cheat:", "Sim From Club') as cheat: cheat.add_token_param('sim_id') cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as", "label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child',", "label='Sim Name', width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema: sub_schema.add_field('rule', label='Rule') with", "'None' entry = {'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color", "= 'None' entry = {'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if", "members_info = [] entry['club_members'] = members_info for sim in club.members: group_members_entry = {'sim_id':", "if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info", "label='Female Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child Uniform',", "criteria_info = [] entry['membership_criteria'] = criteria_info if club.membership_criteria: for criteria in club.membership_criteria: criteria_entry", "'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids] rules_info = [] entry['club_rules'] = rules_info if", "rules_info = [] entry['club_rules'] = rules_info if club.rules: for rule in club.rules: rules_entry", "with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as", "label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) sub_schema.add_field('is_leader', label='Is Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema,", "club.rules: for rule in club.rules: rules_entry = {'rule': str(rule)} rules_info.append(rules_entry) criteria_info = []", "instance_manager.all_instances_loaded: return [cls.__name__ for cls in instance_manager.types.values()] return [] def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed',", "for club in club_service.clubs: if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE: club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue)) elif", "cheat.add_token_param('club_id') def get_buck_amounts(): return (1, 10, 100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks')", "str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info = [] entry['club_members'] = members_info for sim", "entry['club_rules'] = rules_info if club.rules: for rule in club.rules: rules_entry = {'rule': str(rule)}", "Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING)", "services.get_instance_manager(sims4.resources.Types.CLUB_SEED) if instance_manager.all_instances_loaded: return [cls.__name__ for cls in instance_manager.types.values()] return [] def add_club(manager):", "Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING)", "10, 100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts)", "as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh", "club.rules: rules_entry = {'rule': str(rule)} rules_info.append(rules_entry) criteria_info = [] entry['membership_criteria'] = criteria_info if", "club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str = 'None' entry = {'name': str(club), 'club_id':", "club_service is None: return sim_info_manager = services.sim_info_manager() club_info = [] for club in", "None: return sim_info_manager = services.sim_info_manager() club_info = [] for club in club_service.clubs: if", "label='Add Club Bucks') as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema, label='Club", "sim_info_manager = services.sim_info_manager() club_info = [] for club in club_service.clubs: if club.hangout_setting ==", "str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info = [] entry['club_members'] = members_info", "'Venue: {}'.format(str(club.hangout_venue)) elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str =", "= [] entry['club_rules'] = rules_info if club.rules: for rule in club.rules: rules_entry =", "'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult':", "'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str = 'None' entry = {'name': str(club), 'club_id': str(club.club_id), 'hangout':", "'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))} members_info = [] entry['club_members']", "club_schema = GsiGridSchema(label='Club Info') club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout',", "for sim in club.members: group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is", "rule in club.rules: rules_entry = {'rule': str(rule)} rules_info.append(rules_entry) criteria_info = [] entry['membership_criteria'] =", "sub_schema.add_field('sim_name', label='Sim Name', width=0.4) with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema: sub_schema.add_field('rule', label='Rule')", "is club.leader)} members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids]", "sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) sub_schema.add_field('is_leader', label='Is Leader') with club_schema.add_has_many('club_recent_members',", "get_buck_amounts(): return (1, 10, 100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat:", "label='End Club Gathering') as cheat: cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat: cheat.add_token_param('club_id')", "Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female", "club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat: cheat.add_static_param('ClubBucks') cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts) cheat.add_token_param('club_id') with club_schema.add_has_many('club_members', GsiGridSchema,", "sub_schema.add_field('sim_name', label='Sim Name', width=0.4) sub_schema.add_field('is_leader', label='Is Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as", "type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female", "label='Male Adult Uniform', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING) def generate_all_club_seeds(): instance_manager =", "club._recent_member_ids] rules_info = [] entry['club_rules'] = rules_info if club.rules: for rule in club.rules:", "return (1, 10, 100, 1000) with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat: cheat.add_static_param('ClubBucks')", "rules_entry = {'rule': str(rule)} rules_info.append(rules_entry) criteria_info = [] entry['membership_criteria'] = criteria_info if club.membership_criteria:", "as sub_schema: sub_schema.add_field('criteria', label='Criteria') @GsiHandler('club_info', club_schema) def generate_club_info_data(): club_service = services.get_club_service() if club_service", "cheat.add_token_param('club_id') with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat: cheat.add_token_param('club_id') def get_buck_amounts(): return (1,", "GsiGridSchema, label='Club Rules') as sub_schema: sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as", "members_info.append(group_members_entry) entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids] rules_info =", "as sub_schema: sub_schema.add_field('rule', label='Rule') with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema: sub_schema.add_field('criteria', label='Criteria')", "ClubHangoutSetting.HANGOUT_LOT: club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id) else: club_hangout_str = 'None' entry = {'name': str(club),", "{'rule': str(rule)} rules_info.append(rules_entry) criteria_info = [] entry['membership_criteria'] = criteria_info if club.membership_criteria: for criteria", "club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat: cheat.add_token_param('club_id') def get_buck_amounts(): return (1, 10, 100,", "club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name',", "type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING) club_schema.add_field('uniform_male_child', label='Male Child", "label='Name', type=GsiFieldVisualizers.STRING) club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True) club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING) club_schema.add_field('associated_color', label='Associated", "[] def add_club(manager): with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat: cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds) services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club) with", "width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) sub_schema.add_field('is_leader', label='Is Leader') with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members')", "Members') as sub_schema: sub_schema.add_field('sim_id', label='Sim ID', width=0.35) sub_schema.add_field('sim_name', label='Sim Name', width=0.4) sub_schema.add_field('is_leader', label='Is" ]
[ "RpcErrorEncoder(JSONEncoder): def encode(self, o): return { ID: o.id, JSON_RPC: JSON_RPC_VERSION, ERROR: { CODE:", "from httputils import error class RpcError(Exception): def __init__(self, id, message, code): self._message =", "value): self._id = value @property def message(self): return self._message @message.setter def message(self, value):", "value): self._code = value @property def id(self): return self._id @id.setter def id(self, value):", "RpcBadRequestError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=BAD_REQUEST_CODE ) def parseToHttpError(self): return", "message(self, value): self._message = value def parseToHttpError(self): return error.Error( message=self.message, code=self.code ) def", "@property def message(self): return self._message @message.setter def message(self, value): self._message = value def", "def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message,", "= value @property def id(self): return self._id @id.setter def id(self, value): self._id =", "parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE", "super().__init__(self.message) @property def code(self): return self._code @code.setter def code(self, value): self._code = value", "code self._id = id super().__init__(self.message) @property def code(self): return self._code @code.setter def code(self,", "def parseToHttpError(self): return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message,", "def encode(self, o): return { ID: o.id, JSON_RPC: JSON_RPC_VERSION, ERROR: { CODE: o.code,", "self._message = value def parseToHttpError(self): return error.Error( message=self.message, code=self.code ) def jsonEncode(self): return", "self._id = value @property def message(self): return self._message @message.setter def message(self, value): self._message", ") def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def __init__(self, id, message): super().__init__( id=id,", "from .constants import * from json import JSONEncoder from httputils import error class", "@property def id(self): return self._id @id.setter def id(self, value): self._id = value @property", "message=message, code=BAD_REQUEST_CODE ) def parseToHttpError(self): return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def __init__(self, id, message):", "@message.setter def message(self, value): self._message = value def parseToHttpError(self): return error.Error( message=self.message, code=self.code", "message=message, code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def __init__(self, id, message):", "code=self.code ) def jsonEncode(self): return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def __init__(self, id, message): super().__init__(", "return self._id @id.setter def id(self, value): self._id = value @property def message(self): return", "self._message @message.setter def message(self, value): self._message = value def parseToHttpError(self): return error.Error( message=self.message,", "* from json import JSONEncoder from httputils import error class RpcError(Exception): def __init__(self,", "= message self._code = code self._id = id super().__init__(self.message) @property def code(self): return", ") def parseToHttpError(self): return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def encode(self, o): return { ID:", "def __init__(self, id, message, code): self._message = message self._code = code self._id =", "@id.setter def id(self, value): self._id = value @property def message(self): return self._message @message.setter", "message, code): self._message = message self._code = code self._id = id super().__init__(self.message) @property", "super().__init__( id=id, message=message, code=NOT_FOUND_CODE ) def parseToHttpError(self): return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def encode(self,", "return self._message @message.setter def message(self, value): self._message = value def parseToHttpError(self): return error.Error(", "value @property def id(self): return self._id @id.setter def id(self, value): self._id = value", "return self._code @code.setter def code(self, value): self._code = value @property def id(self): return", "def message(self, value): self._message = value def parseToHttpError(self): return error.Error( message=self.message, code=self.code )", "<gh_stars>0 #!/usr/bin/python from .constants import * from json import JSONEncoder from httputils import", "value @property def message(self): return self._message @message.setter def message(self, value): self._message = value", "id=id, message=message, code=BAD_REQUEST_CODE ) def parseToHttpError(self): return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def __init__(self, id,", "import * from json import JSONEncoder from httputils import error class RpcError(Exception): def", "message self._code = code self._id = id super().__init__(self.message) @property def code(self): return self._code", "id, message): super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError):", "RpcMethodNotAllowedError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self): return", "self._code @code.setter def code(self, value): self._code = value @property def id(self): return self._id", "class RpcInternalServerError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self):", "message=self.message, code=self.code ) def jsonEncode(self): return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def __init__(self, id, message):", ") def parseToHttpError(self): return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def __init__(self, id, message): super().__init__( id=id,", "super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def __init__(self,", "= id super().__init__(self.message) @property def code(self): return self._code @code.setter def code(self, value): self._code", "from json import JSONEncoder from httputils import error class RpcError(Exception): def __init__(self, id,", "def __init__(self, id, message): super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message)", ".constants import * from json import JSONEncoder from httputils import error class RpcError(Exception):", "error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def encode(self, o): return { ID: o.id, JSON_RPC: JSON_RPC_VERSION, ERROR:", "RpcNotFoundError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=NOT_FOUND_CODE ) def parseToHttpError(self): return", ") def jsonEncode(self): return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def __init__(self, id, message): super().__init__( id=id,", "= value def parseToHttpError(self): return error.Error( message=self.message, code=self.code ) def jsonEncode(self): return RpcErrorEncoder().encode(self)", "json import JSONEncoder from httputils import error class RpcError(Exception): def __init__(self, id, message,", "value def parseToHttpError(self): return error.Error( message=self.message, code=self.code ) def jsonEncode(self): return RpcErrorEncoder().encode(self) class", "__init__(self, id, message): super().__init__( id=id, message=message, code=NOT_FOUND_CODE ) def parseToHttpError(self): return error.NotFoundError(message=self.message) class", "super().__init__( id=id, message=message, code=BAD_REQUEST_CODE ) def parseToHttpError(self): return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def __init__(self,", "code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self): return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def __init__(self, id, message): super().__init__(", "parseToHttpError(self): return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=NOT_FOUND_CODE", "RpcError(Exception): def __init__(self, id, message, code): self._message = message self._code = code self._id", "class RpcBadRequestError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=BAD_REQUEST_CODE ) def parseToHttpError(self):", "id(self, value): self._id = value @property def message(self): return self._message @message.setter def message(self,", "self._code = value @property def id(self): return self._id @id.setter def id(self, value): self._id", "self._id = id super().__init__(self.message) @property def code(self): return self._code @code.setter def code(self, value):", "def id(self, value): self._id = value @property def message(self): return self._message @message.setter def", "RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=BAD_REQUEST_CODE ) def", "httputils import error class RpcError(Exception): def __init__(self, id, message, code): self._message = message", "message): super().__init__( id=id, message=message, code=BAD_REQUEST_CODE ) def parseToHttpError(self): return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def", "return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE )", "id=id, message=message, code=NOT_FOUND_CODE ) def parseToHttpError(self): return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def encode(self, o):", "jsonEncode(self): return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=BAD_REQUEST_CODE", "id, message): super().__init__( id=id, message=message, code=NOT_FOUND_CODE ) def parseToHttpError(self): return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder):", "encode(self, o): return { ID: o.id, JSON_RPC: JSON_RPC_VERSION, ERROR: { CODE: o.code, MESSAGE:", "def __init__(self, id, message): super().__init__( id=id, message=message, code=NOT_FOUND_CODE ) def parseToHttpError(self): return error.NotFoundError(message=self.message)", "id(self): return self._id @id.setter def id(self, value): self._id = value @property def message(self):", "return error.Error( message=self.message, code=self.code ) def jsonEncode(self): return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def __init__(self,", "error.Error( message=self.message, code=self.code ) def jsonEncode(self): return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def __init__(self, id,", "JSONEncoder from httputils import error class RpcError(Exception): def __init__(self, id, message, code): self._message", "message(self): return self._message @message.setter def message(self, value): self._message = value def parseToHttpError(self): return", "id, message): super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self): return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError):", "return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=BAD_REQUEST_CODE )", "def code(self, value): self._code = value @property def id(self): return self._id @id.setter def", "o): return { ID: o.id, JSON_RPC: JSON_RPC_VERSION, ERROR: { CODE: o.code, MESSAGE: o.message", "id=id, message=message, code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def __init__(self, id,", "class RpcMethodNotAllowedError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self):", "__init__(self, id, message): super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class", "id super().__init__(self.message) @property def code(self): return self._code @code.setter def code(self, value): self._code =", "def __init__(self, id, message): super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self): return error.InternalServerError(message=self.message)", "RpcInternalServerError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self): return", "message): super().__init__( id=id, message=message, code=NOT_FOUND_CODE ) def parseToHttpError(self): return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def", "__init__(self, id, message): super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self): return error.InternalServerError(message=self.message) class", "return { ID: o.id, JSON_RPC: JSON_RPC_VERSION, ERROR: { CODE: o.code, MESSAGE: o.message }", "def code(self): return self._code @code.setter def code(self, value): self._code = value @property def", "def parseToHttpError(self): return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message,", "message): super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def", "code(self): return self._code @code.setter def code(self, value): self._code = value @property def id(self):", "message=message, code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self): return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def __init__(self, id, message):", "id, message, code): self._message = message self._code = code self._id = id super().__init__(self.message)", "code=METHOD_NOT_ALLOWED_CODE ) def parseToHttpError(self): return error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def __init__(self, id, message): super().__init__(", "value): self._message = value def parseToHttpError(self): return error.Error( message=self.message, code=self.code ) def jsonEncode(self):", "error.MethodNotAllowedError(message=self.message) class RpcInternalServerError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE ) def", "self._message = message self._code = code self._id = id super().__init__(self.message) @property def code(self):", "id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self): return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def __init__(self, id,", "parseToHttpError(self): return error.Error( message=self.message, code=self.code ) def jsonEncode(self): return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def", "class RpcError(Exception): def __init__(self, id, message, code): self._message = message self._code = code", "self._code = code self._id = id super().__init__(self.message) @property def code(self): return self._code @code.setter", "id, message): super().__init__( id=id, message=message, code=BAD_REQUEST_CODE ) def parseToHttpError(self): return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError):", "return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def encode(self, o): return { ID: o.id, JSON_RPC: JSON_RPC_VERSION,", "code=BAD_REQUEST_CODE ) def parseToHttpError(self): return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def __init__(self, id, message): super().__init__(", "def parseToHttpError(self): return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def encode(self, o): return { ID: o.id,", "message): super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self): return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def", "self._id @id.setter def id(self, value): self._id = value @property def message(self): return self._message", ") def parseToHttpError(self): return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def __init__(self, id, message): super().__init__( id=id,", "return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=NOT_FOUND_CODE )", "class RpcNotFoundError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=NOT_FOUND_CODE ) def parseToHttpError(self):", "__init__(self, id, message, code): self._message = message self._code = code self._id = id", "parseToHttpError(self): return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def encode(self, o): return { ID: o.id, JSON_RPC:", "super().__init__( id=id, message=message, code=INTERNAL_SERVER_ERROR_CODE ) def parseToHttpError(self): return error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def __init__(self,", "@code.setter def code(self, value): self._code = value @property def id(self): return self._id @id.setter", "def jsonEncode(self): return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message,", "def message(self): return self._message @message.setter def message(self, value): self._message = value def parseToHttpError(self):", "def __init__(self, id, message): super().__init__( id=id, message=message, code=BAD_REQUEST_CODE ) def parseToHttpError(self): return error.BadRequestError(message=self.message)", "__init__(self, id, message): super().__init__( id=id, message=message, code=BAD_REQUEST_CODE ) def parseToHttpError(self): return error.BadRequestError(message=self.message) class", "parseToHttpError(self): return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE", "class RpcErrorEncoder(JSONEncoder): def encode(self, o): return { ID: o.id, JSON_RPC: JSON_RPC_VERSION, ERROR: {", "import JSONEncoder from httputils import error class RpcError(Exception): def __init__(self, id, message, code):", "def id(self): return self._id @id.setter def id(self, value): self._id = value @property def", "= value @property def message(self): return self._message @message.setter def message(self, value): self._message =", "def parseToHttpError(self): return error.Error( message=self.message, code=self.code ) def jsonEncode(self): return RpcErrorEncoder().encode(self) class RpcBadRequestError(RpcError):", "code=NOT_FOUND_CODE ) def parseToHttpError(self): return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def encode(self, o): return {", "return error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE )", "import error class RpcError(Exception): def __init__(self, id, message, code): self._message = message self._code", "code(self, value): self._code = value @property def id(self): return self._id @id.setter def id(self,", "error class RpcError(Exception): def __init__(self, id, message, code): self._message = message self._code =", "error.InternalServerError(message=self.message) class RpcNotFoundError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=NOT_FOUND_CODE ) def", "= code self._id = id super().__init__(self.message) @property def code(self): return self._code @code.setter def", "{ ID: o.id, JSON_RPC: JSON_RPC_VERSION, ERROR: { CODE: o.code, MESSAGE: o.message } }", "#!/usr/bin/python from .constants import * from json import JSONEncoder from httputils import error", "message=message, code=NOT_FOUND_CODE ) def parseToHttpError(self): return error.NotFoundError(message=self.message) class RpcErrorEncoder(JSONEncoder): def encode(self, o): return", "code): self._message = message self._code = code self._id = id super().__init__(self.message) @property def", "@property def code(self): return self._code @code.setter def code(self, value): self._code = value @property", "error.BadRequestError(message=self.message) class RpcMethodNotAllowedError(RpcError): def __init__(self, id, message): super().__init__( id=id, message=message, code=METHOD_NOT_ALLOWED_CODE ) def" ]
[ "cur_data = cur_data[cur_data.property_type == \"Apartment\"] cur_data = cur_data.drop(columns = [\"property_type\"]) print(len(cur_data)) cur_data =", "'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\" ] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data =", "cur_data.longitude, cur_data.latitude ) ) cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data))", "\"Brooklyn\" cur_data = cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data = cur_data[cur_data.accommodates < 9] print(len(cur_data))", "] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns = [ 'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates', 'bathrooms',", "cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data = geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude", "'bathrooms', 'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\" ] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data", "print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].str.split(\"-\").map(lambda work_list: work_list[0]) cur_data[\"zipcode\"]", "[ 'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\"", "= [\"property_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.bed_type == \"Real Bed\"] cur_data = cur_data.drop(columns =", "] cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"] =", "cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns = [ 'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'number_of_reviews',", "cur_data.reviews_per_month > 1/8 ] cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data =", "import geopandas def compile_airbnb_data(cur_link_table): cur_tables = [] for cur_row in cur_link_table.itertuples(): tmp_table =", "cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data = cur_data[cur_data.room_type == \"Entire home/apt\"] cur_data = cur_data.drop(columns = [\"room_type\"])", "cur_selector = cur_selector[ cur_selector > 3 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns =", "cur_data = cur_data.drop(columns = [\"room_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.property_type == \"Apartment\"] cur_data =", "= cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].str.split(\"-\").map(lambda work_list: work_list[0]) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].astype(\"int\")", "import pandas as pd import geopandas def compile_airbnb_data(cur_link_table): cur_tables = [] for cur_row", "= cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[ cur_selector == 1 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data))", "[] for cur_row in cur_link_table.itertuples(): tmp_table = cur_row.table.copy() tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"] =", "= cur_row.year tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"],", "cur_data[ cur_data.beds > 0 ] cur_data = cur_data[ cur_data.beds < 7 ] print(len(cur_data))", "= cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data = cur_data[ cur_data.reviews_per_month > 1/8 ] cur_data = cur_data.drop(columns=[\"reviews_per_month\"])", "geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude ) ) cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"])", "cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data = cur_data[ cur_data.reviews_per_month > 1/8 ] cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data))", "tmp_table = cur_row.table.copy() tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table)", "= cur_data[cur_data.accommodates < 9] print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms >= 1] print(len(cur_data)) cur_data =", "cur_data = cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data))", "as pd import geopandas def compile_airbnb_data(cur_link_table): cur_tables = [] for cur_row in cur_link_table.itertuples():", "= [\"room_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.property_type == \"Apartment\"] cur_data = cur_data.drop(columns = [\"property_type\"])", "cur_data.bedrooms < 5 ] print(len(cur_data)) cur_data = cur_data[ cur_data.beds > 0 ] cur_data", "cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[ cur_selector", "on=\"id\", how=\"right\") cur_data = geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude ) ) cur_data =", "0 ] cur_data = cur_data[ cur_data.bedrooms < 5 ] print(len(cur_data)) cur_data = cur_data[", "\"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating > 60] cur_data =", "cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data = cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data))", "home/apt\"] cur_data = cur_data.drop(columns = [\"room_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.property_type == \"Apartment\"] cur_data", "cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector =", "[\"property_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.bed_type == \"Real Bed\"] cur_data = cur_data.drop(columns = [\"bed_type\"])", "'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\" ]", "= cur_data.drop(columns = [\"property_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.bed_type == \"Real Bed\"] cur_data =", "== \"Real Bed\"] cur_data = cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\",", "cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data =", "tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables)", "cur_data[cur_data.property_type == \"Apartment\"] cur_data = cur_data.drop(columns = [\"property_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.bed_type ==", "cur_data = cur_data[cur_data.bathrooms >= 1] print(len(cur_data)) cur_data = cur_data[ cur_data.bedrooms > 0 ]", "= cur_data[ cur_data.bedrooms > 0 ] cur_data = cur_data[ cur_data.bedrooms < 5 ]", "7 ] print(len(cur_data)) cur_data = cur_data[ cur_data.number_of_reviews > 5 ] cur_data = cur_data.drop(columns=[\"number_of_reviews\"])", "geopandas def compile_airbnb_data(cur_link_table): cur_tables = [] for cur_row in cur_link_table.itertuples(): tmp_table = cur_row.table.copy()", "= cur_data[cur_data[\"price\"] < 1250] cur_data = cur_data[cur_data[\"price\"] > 25] print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count()", "cur_data = cur_data[cur_data.bed_type == \"Real Bed\"] cur_data = cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data)) cur_data", "] cur_data = cur_data[ cur_data.bedrooms < 5 ] print(len(cur_data)) cur_data = cur_data[ cur_data.beds", "= [\"bed_type\"]) print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\",", "print(len(cur_data)) cur_data = cur_data[ cur_data.reviews_per_month > 1/8 ] cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data", "] print(len(cur_data)) cur_data = cur_data[ cur_data.number_of_reviews > 5 ] cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data))", "1/8 ] cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"]", "cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].str.split(\"-\").map(lambda work_list: work_list[0])", "cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"] < 1250] cur_data = cur_data[cur_data[\"price\"] > 25] print(len(cur_data))", "cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact == \"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data =", "cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector", "cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"] < 1250] cur_data = cur_data[cur_data[\"price\"] >", "cur_data = cur_data[cur_data.room_type == \"Entire home/apt\"] cur_data = cur_data.drop(columns = [\"room_type\"]) print(len(cur_data)) cur_data", "[\"bed_type\"]) print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int)", "cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"] <", "cur_data[cur_data[\"price\"] < 1250] cur_data = cur_data[cur_data[\"price\"] > 25] print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector", "cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data = cur_data.drop(columns =", "\"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"] < 1250]", "print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating > 60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact", "= cur_row.table.copy() tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table) cur_data", "cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] =", "> 60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact == \"t\"] cur_data =", "print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"] < 1250] cur_data = cur_data[cur_data[\"price\"]", "cur_row.year tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True)", "cur_data[cur_data.bed_type == \"Real Bed\"] cur_data = cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\",", "3 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns = [ 'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates',", "\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating > 60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data =", "cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].str.split(\"-\").map(lambda work_list: work_list[0]) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].astype(\"int\") return cur_data", "cur_data = pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"])", "firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data = geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy(", "cur_data = cur_data[cur_data[\"price\"] > 25] print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[ cur_selector", "cur_data = cur_data[ cur_data.bedrooms < 5 ] print(len(cur_data)) cur_data = cur_data[ cur_data.beds >", "= cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating > 60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data))", "\"last_review\"]) print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[ cur_selector == 1 ] cur_data", "= [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data = cur_data[cur_data.accommodates < 9] print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms >=", "5 ] print(len(cur_data)) cur_data = cur_data[ cur_data.beds > 0 ] cur_data = cur_data[", "cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data = cur_data[cur_data.accommodates < 9] print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms", "cur_data[ cur_data.beds < 7 ] print(len(cur_data)) cur_data = cur_data[ cur_data.number_of_reviews > 5 ]", "cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[ cur_selector == 1 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)]", "'is_location_exact', \"datetime\" ] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data =", "== \"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed", "9] print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms >= 1] print(len(cur_data)) cur_data = cur_data[ cur_data.bedrooms >", "cur_data.drop(columns = [\"property_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.bed_type == \"Real Bed\"] cur_data = cur_data.drop(columns", "\"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"] < 1250] cur_data =", "'longitude', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\" ] firsts_table =", "cur_data = cur_data[cur_data.accommodates < 9] print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms >= 1] print(len(cur_data)) cur_data", "= cur_data[ cur_data.beds > 0 ] cur_data = cur_data[ cur_data.beds < 7 ]", "in cur_link_table.itertuples(): tmp_table = cur_row.table.copy() tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"] =", "cur_data[cur_data.bathrooms >= 1] print(len(cur_data)) cur_data = cur_data[ cur_data.bedrooms > 0 ] cur_data =", "25] print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[ cur_selector > 3 ] cur_data", "] print(len(cur_data)) cur_data = cur_data[ cur_data.beds > 0 ] cur_data = cur_data[ cur_data.beds", "'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\" ] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\",", "cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating > 60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"])", "= cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact == \"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data", "= cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data", "cur_data = cur_data[ cur_data.number_of_reviews > 5 ] cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data =", "cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data = cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data = cur_data[cur_data.accommodates <", "cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].str.split(\"-\").map(lambda work_list: work_list[0]) cur_data[\"zipcode\"] =", "= [] for cur_row in cur_link_table.itertuples(): tmp_table = cur_row.table.copy() tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"]", "print(len(cur_data)) cur_data = cur_data[ cur_data.beds > 0 ] cur_data = cur_data[ cur_data.beds <", "print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[ cur_selector > 3 ] cur_data =", "cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[ cur_selector > 3 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)]", "cur_data = geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude ) ) cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"])", "cur_data = cur_data[ cur_data.reviews_per_month > 1/8 ] cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data =", "print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact == \"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\",", "= cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].str.split(\"-\").map(lambda work_list:", "[\"room_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.property_type == \"Apartment\"] cur_data = cur_data.drop(columns = [\"property_type\"]) print(len(cur_data))", "print(len(cur_data)) cur_data = cur_data[cur_data.property_type == \"Apartment\"] cur_data = cur_data.drop(columns = [\"property_type\"]) print(len(cur_data)) cur_data", "cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data = cur_data[cur_data.room_type == \"Entire home/apt\"] cur_data = cur_data.drop(columns", "cur_row in cur_link_table.itertuples(): tmp_table = cur_row.table.copy() tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"]", "\"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"] < 1250] cur_data", "= cur_data[cur_data.review_scores_rating > 60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact == \"t\"]", "print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data", "cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating >", "print(len(cur_data)) cur_data = cur_data[cur_data.room_type == \"Entire home/apt\"] cur_data = cur_data.drop(columns = [\"room_type\"]) print(len(cur_data))", "> 5 ] cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data = cur_data[ cur_data.reviews_per_month > 1/8", "= cur_data[cur_data.room_type == \"Entire home/apt\"] cur_data = cur_data.drop(columns = [\"room_type\"]) print(len(cur_data)) cur_data =", "cur_data = cur_data[ cur_data.beds > 0 ] cur_data = cur_data[ cur_data.beds < 7", "= cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique()", "60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact == \"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"])", "cur_data[ cur_data.reviews_per_month > 1/8 ] cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data", "cur_data = cur_data[ cur_data.beds < 7 ] print(len(cur_data)) cur_data = cur_data[ cur_data.number_of_reviews >", "cur_data.bedrooms > 0 ] cur_data = cur_data[ cur_data.bedrooms < 5 ] print(len(cur_data)) cur_data", "[\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data = cur_data[cur_data.accommodates < 9] print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms >= 1]", "= cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data = geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude,", "= cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns = [ 'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates', 'bathrooms', 'bedrooms', 'beds',", "tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data", "] cur_data = cur_data[ cur_data.beds < 7 ] print(len(cur_data)) cur_data = cur_data[ cur_data.number_of_reviews", "= cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[ cur_selector > 3 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data))", "= cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data = cur_data[cur_data.accommodates < 9] print(len(cur_data)) cur_data =", ") cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating", "cur_data[cur_data.review_scores_rating > 60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact == \"t\"] cur_data", "cur_data = cur_data[cur_data.is_location_exact == \"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])]", "print(len(cur_data)) replaced_columns = [ 'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating',", "= cur_data[ cur_data.reviews_per_month > 1/8 ] cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"])", "'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\" ] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table,", "= cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"] < 1250] cur_data = cur_data[cur_data[\"price\"] > 25]", "1] print(len(cur_data)) cur_data = cur_data[ cur_data.bedrooms > 0 ] cur_data = cur_data[ cur_data.bedrooms", "cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].str.split(\"-\").map(lambda work_list: work_list[0]) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].astype(\"int\") return", ">= 1] print(len(cur_data)) cur_data = cur_data[ cur_data.bedrooms > 0 ] cur_data = cur_data[", "cur_tables = [] for cur_row in cur_link_table.itertuples(): tmp_table = cur_row.table.copy() tmp_table[\"month\"] = cur_row.month", "print(len(cur_data)) cur_data = cur_data[cur_data.bed_type == \"Real Bed\"] cur_data = cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data))", "cur_data = cur_data[cur_data.review_scores_rating > 60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact ==", "== \"Entire home/apt\"] cur_data = cur_data.drop(columns = [\"room_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.property_type ==", "cur_data.latitude ) ) cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data", "= cur_data.drop(columns = [\"room_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.property_type == \"Apartment\"] cur_data = cur_data.drop(columns", "for cur_row in cur_link_table.itertuples(): tmp_table = cur_row.table.copy() tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"] = cur_row.year", "cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\",", "cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns = [ 'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates', 'bathrooms', 'bedrooms',", "cur_selector[ cur_selector > 3 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns = [ 'neighbourhood_group_cleansed',", "< 5 ] print(len(cur_data)) cur_data = cur_data[ cur_data.beds > 0 ] cur_data =", "cur_data[cur_data.is_location_exact == \"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] =", "1 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data = cur_data[cur_data.room_type == \"Entire home/apt\"] cur_data", "= cur_row.month tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables) cur_data", "cur_data = cur_data.drop(columns = [\"property_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.bed_type == \"Real Bed\"] cur_data", "\"Apartment\"] cur_data = cur_data.drop(columns = [\"property_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.bed_type == \"Real Bed\"]", "= pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data))", "cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[ cur_selector == 1", "cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[ cur_selector == 1 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data", "'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\" ] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\")", "print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data = cur_data.drop(columns", "\"datetime\" ] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data = geopandas.GeoDataFrame(", "tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\",", "cur_link_table.itertuples(): tmp_table = cur_row.table.copy() tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"] = cur_row.datetime", "'accommodates', 'bathrooms', 'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\" ] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns]", "cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data = geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude )", "cur_selector == 1 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data = cur_data[cur_data.room_type == \"Entire", "< 9] print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms >= 1] print(len(cur_data)) cur_data = cur_data[ cur_data.bedrooms", "> 0 ] cur_data = cur_data[ cur_data.beds < 7 ] print(len(cur_data)) cur_data =", "5 ] cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data = cur_data[ cur_data.reviews_per_month > 1/8 ]", "= cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data = cur_data[cur_data.room_type == \"Entire home/apt\"] cur_data = cur_data.drop(columns =", "cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating > 60] cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data", "= cur_data[ cur_data.beds < 7 ] print(len(cur_data)) cur_data = cur_data[ cur_data.number_of_reviews > 5", "cur_row.table.copy() tmp_table[\"month\"] = cur_row.month tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table) cur_data =", "1250] cur_data = cur_data[cur_data[\"price\"] > 25] print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[", "= [ 'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact',", "cur_data[ cur_data.bedrooms > 0 ] cur_data = cur_data[ cur_data.bedrooms < 5 ] print(len(cur_data))", "cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data = geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude ) ) cur_data", "cur_data[ cur_data.number_of_reviews > 5 ] cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data = cur_data[ cur_data.reviews_per_month", "= cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"]", "== \"Apartment\"] cur_data = cur_data.drop(columns = [\"property_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.bed_type == \"Real", "cur_row.datetime cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\",", "how=\"right\") cur_data = geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude ) ) cur_data = cur_data.drop(columns=[\"longitude\",", "] cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data = cur_data[ cur_data.reviews_per_month > 1/8 ] cur_data", "= cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"]", "cur_data[cur_data.accommodates < 9] print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms >= 1] print(len(cur_data)) cur_data = cur_data[", "cur_row.month tmp_table[\"year\"] = cur_row.year tmp_table[\"datetime\"] = cur_row.datetime cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables) cur_data =", "print(len(cur_data)) cur_data = cur_data[ cur_data.number_of_reviews > 5 ] cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data", "replaced_columns = [ 'neighbourhood_group_cleansed', 'latitude', 'longitude', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month',", "print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms >= 1] print(len(cur_data)) cur_data = cur_data[ cur_data.bedrooms > 0", "cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data = cur_data[ cur_data.reviews_per_month > 1/8 ] cur_data =", "cur_selector[ cur_selector == 1 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data = cur_data[cur_data.room_type ==", "cur_data.drop(columns = [\"room_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.property_type == \"Apartment\"] cur_data = cur_data.drop(columns =", "= cur_data[cur_data[\"price\"] > 25] print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[ cur_selector >", "cur_data[cur_data[\"price\"] > 25] print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[ cur_selector > 3", "\"Entire home/apt\"] cur_data = cur_data.drop(columns = [\"room_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.property_type == \"Apartment\"]", "cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[ cur_selector > 3 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns", "cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True) cur_data[\"zipcode\"] = cur_data[\"zipcode\"].str.split(\"-\").map(lambda", "= cur_row.datetime cur_tables.append(tmp_table) cur_data = pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data =", "cur_selector > 3 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns = [ 'neighbourhood_group_cleansed', 'latitude',", "cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude ) ) cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\",", "cur_data = cur_data.drop(columns=[\"review_scores_rating\"]) print(len(cur_data)) cur_data = cur_data[cur_data.is_location_exact == \"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data))", "= cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[ cur_selector ==", "= cur_selector[ cur_selector == 1 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data = cur_data[cur_data.room_type", "> 0 ] cur_data = cur_data[ cur_data.bedrooms < 5 ] print(len(cur_data)) cur_data =", "cur_data.number_of_reviews > 5 ] cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data = cur_data[ cur_data.reviews_per_month >", "== 1 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data = cur_data[cur_data.room_type == \"Entire home/apt\"]", "compile_airbnb_data(cur_link_table): cur_tables = [] for cur_row in cur_link_table.itertuples(): tmp_table = cur_row.table.copy() tmp_table[\"month\"] =", "= cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data = cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data = cur_data[cur_data.accommodates", "== \"Brooklyn\" cur_data = cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data = cur_data[cur_data.accommodates < 9]", "] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data = cur_data[cur_data.room_type == \"Entire home/apt\"] cur_data =", "> 3 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns = [ 'neighbourhood_group_cleansed', 'latitude', 'longitude',", "'latitude', 'longitude', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'number_of_reviews', 'review_scores_rating', 'reviews_per_month', 'is_location_exact', \"datetime\" ] firsts_table", "cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data = cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data =", "cur_data.beds < 7 ] print(len(cur_data)) cur_data = cur_data[ cur_data.number_of_reviews > 5 ] cur_data", "pandas as pd import geopandas def compile_airbnb_data(cur_link_table): cur_tables = [] for cur_row in", "= cur_data[cur_data.is_location_exact == \"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"]", "= cur_data[cur_data.bathrooms >= 1] print(len(cur_data)) cur_data = cur_data[ cur_data.bedrooms > 0 ] cur_data", "] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data = geopandas.GeoDataFrame( cur_data,", "pd import geopandas def compile_airbnb_data(cur_link_table): cur_tables = [] for cur_row in cur_link_table.itertuples(): tmp_table", "pd.concat(cur_tables) cur_data = cur_data.sort_values(by=[\"id\", \"datetime\"], ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector", "Bed\"] cur_data = cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"])", "cur_data = cur_data[cur_data[\"price\"] < 1250] cur_data = cur_data[cur_data[\"price\"] > 25] print(len(cur_data)) cur_selector =", "\"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data = cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data", "< 1250] cur_data = cur_data[cur_data[\"price\"] > 25] print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector =", "= cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating > 60]", "< 7 ] print(len(cur_data)) cur_data = cur_data[ cur_data.number_of_reviews > 5 ] cur_data =", "> 25] print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"id\"].count() cur_selector = cur_selector[ cur_selector > 3 ]", "cur_data[cur_data.room_type == \"Entire home/apt\"] cur_data = cur_data.drop(columns = [\"room_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.property_type", "ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[", "cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed == \"Brooklyn\"", "> 1/8 ] cur_data = cur_data.drop(columns=[\"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data.drop(columns=[\"datetime\"]) cur_data = cur_data.reset_index(drop=True)", ") ) cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data =", "cur_data.beds > 0 ] cur_data = cur_data[ cur_data.beds < 7 ] print(len(cur_data)) cur_data", "= geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude ) ) cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data", "= cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed == \"Brooklyn\" cur_data = cur_data.drop(columns = [\"neighbourhood_group_cleansed\"])", "= cur_selector[ cur_selector > 3 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) replaced_columns = [", "'reviews_per_month', 'is_location_exact', \"datetime\" ] firsts_table = cur_data.groupby(\"id\").first()[replaced_columns] cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data", "cur_data = cur_data.drop(columns = [\"neighbourhood_group_cleansed\"]) print(len(cur_data)) cur_data = cur_data[cur_data.accommodates < 9] print(len(cur_data)) cur_data", "cur_data = cur_data[ cur_data.bedrooms > 0 ] cur_data = cur_data[ cur_data.bedrooms < 5", "0 ] cur_data = cur_data[ cur_data.beds < 7 ] print(len(cur_data)) cur_data = cur_data[", "\"\").astype(float).round().astype(int) cur_data = cur_data[cur_data[\"price\"] < 1250] cur_data = cur_data[cur_data[\"price\"] > 25] print(len(cur_data)) cur_selector", "= cur_data.drop(columns=replaced_columns).merge(firsts_table, on=\"id\", how=\"right\") cur_data = geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude ) )", "cur_data[ cur_data.bedrooms < 5 ] print(len(cur_data)) cur_data = cur_data[ cur_data.beds > 0 ]", "= cur_data[cur_data.property_type == \"Apartment\"] cur_data = cur_data.drop(columns = [\"property_type\"]) print(len(cur_data)) cur_data = cur_data[cur_data.bed_type", "\"datetime\"], ascending=False).reset_index(drop=True) cur_data = cur_data.drop(columns=[\"host_id\", \"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector =", "\"Real Bed\"] cur_data = cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data)) cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\",", "cur_data = cur_data.dropna(subset=[\"zipcode\", \"beds\", \"bedrooms\", \"bathrooms\"]) print(len(cur_data)) cur_data[\"price\"] = cur_data.price.str.replace(r\"[\\$\\,]\", \"\").astype(float).round().astype(int) cur_data =", "geopandas.GeoDataFrame( cur_data, geometry=geopandas.points_from_xy( cur_data.longitude, cur_data.latitude ) ) cur_data = cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data =", "\"t\"] cur_data = cur_data.drop(columns=[\"is_location_exact\"]) print(len(cur_data)) cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin([\"Manhattan\", \"Brooklyn\"])] cur_data[\"is_brooklyn\"] = cur_data.neighbourhood_group_cleansed ==", "\"first_review\", \"last_review\"]) print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[ cur_selector == 1 ]", "print(len(cur_data)) cur_data = cur_data[ cur_data.bedrooms > 0 ] cur_data = cur_data[ cur_data.bedrooms <", "cur_data.drop(columns=[\"longitude\", \"latitude\"]) cur_data = cur_data.dropna(subset=[\"review_scores_rating\", \"reviews_per_month\"]) print(len(cur_data)) cur_data = cur_data[cur_data.review_scores_rating > 60] cur_data", "= cur_data[cur_data.bed_type == \"Real Bed\"] cur_data = cur_data.drop(columns = [\"bed_type\"]) print(len(cur_data)) cur_data =", "= cur_data[ cur_data.number_of_reviews > 5 ] cur_data = cur_data.drop(columns=[\"number_of_reviews\"]) print(len(cur_data)) cur_data = cur_data[", "print(len(cur_data)) cur_selector = cur_data.groupby(\"id\")[\"zipcode\"].nunique() cur_selector = cur_selector[ cur_selector == 1 ] cur_data =", "print(len(cur_data)) cur_data = cur_data[cur_data.accommodates < 9] print(len(cur_data)) cur_data = cur_data[cur_data.bathrooms >= 1] print(len(cur_data))", "def compile_airbnb_data(cur_link_table): cur_tables = [] for cur_row in cur_link_table.itertuples(): tmp_table = cur_row.table.copy() tmp_table[\"month\"]", "cur_selector = cur_selector[ cur_selector == 1 ] cur_data = cur_data[cur_data.id.isin(cur_selector.index)] print(len(cur_data)) cur_data =", "= cur_data[ cur_data.bedrooms < 5 ] print(len(cur_data)) cur_data = cur_data[ cur_data.beds > 0" ]
[ ".decoder import TransformerDecoder # noqa 401 from .encoder import TransformerEncoder # noqa 401", "<reponame>bsm8734/formula-image-latex-recognition from .decoder import TransformerDecoder # noqa 401 from .encoder import TransformerEncoder #", "from .decoder import TransformerDecoder # noqa 401 from .encoder import TransformerEncoder # noqa" ]
[ "app.core.cfg import cfg __author__ = 'kclark' logger = logging.getLogger(__name__) app = create_app() def", "'kclark' logger = logging.getLogger(__name__) app = create_app() def run_app(): logger.info('App Server Initializing') app.run(host='localhost',", "create_app() def run_app(): logger.info('App Server Initializing') app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode) logger.info('App Server Running')", "Server Initializing') app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode) logger.info('App Server Running') if __name__ == '__main__':", "__author__ = 'kclark' logger = logging.getLogger(__name__) app = create_app() def run_app(): logger.info('App Server", "from app.core.cfg import cfg __author__ = 'kclark' logger = logging.getLogger(__name__) app = create_app()", "logging.getLogger(__name__) app = create_app() def run_app(): logger.info('App Server Initializing') app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode)", "app = create_app() def run_app(): logger.info('App Server Initializing') app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode) logger.info('App", "from app.core.app import create_app from app.core.cfg import cfg __author__ = 'kclark' logger =", "logging from app.core.app import create_app from app.core.cfg import cfg __author__ = 'kclark' logger", "cfg __author__ = 'kclark' logger = logging.getLogger(__name__) app = create_app() def run_app(): logger.info('App", "Initializing') app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode) logger.info('App Server Running') if __name__ == '__main__': run_app()", "logger = logging.getLogger(__name__) app = create_app() def run_app(): logger.info('App Server Initializing') app.run(host='localhost', port=5000,", "= create_app() def run_app(): logger.info('App Server Initializing') app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode) logger.info('App Server", "import cfg __author__ = 'kclark' logger = logging.getLogger(__name__) app = create_app() def run_app():", "import create_app from app.core.cfg import cfg __author__ = 'kclark' logger = logging.getLogger(__name__) app", "= 'kclark' logger = logging.getLogger(__name__) app = create_app() def run_app(): logger.info('App Server Initializing')", "def run_app(): logger.info('App Server Initializing') app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode) logger.info('App Server Running') if", "run_app(): logger.info('App Server Initializing') app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode) logger.info('App Server Running') if __name__", "import logging from app.core.app import create_app from app.core.cfg import cfg __author__ = 'kclark'", "logger.info('App Server Initializing') app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode) logger.info('App Server Running') if __name__ ==", "create_app from app.core.cfg import cfg __author__ = 'kclark' logger = logging.getLogger(__name__) app =", "app.core.app import create_app from app.core.cfg import cfg __author__ = 'kclark' logger = logging.getLogger(__name__)", "= logging.getLogger(__name__) app = create_app() def run_app(): logger.info('App Server Initializing') app.run(host='localhost', port=5000, threaded=True," ]
[ "\"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\": { \"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) }", "def test_encoding_header_in_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header, payload) = encoding.encode(data)", "plainEncode=True) decoded = encoding.decode(value=encoded, plainEncode=True) assert data == decoded def test_bson_encoding(): encoding =", "encoding.encode(data, plainEncode=True) decoded = encoding.decode(value=encoded, plainEncode=True) assert data == decoded def test_bson_encoding(): encoding", "= createObject(size, size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data", "encoding = Encoding('bson') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded =", "encoding = Encoding('msgpack') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded =", "== decoded def test_encoding_header_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) =", "= bytearray(os.urandom(n)) for i, b in enumerate(ba): ba[i] = min_lc + b %", "obj = { \"bytesData\": bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom),", "enumerate(ba): ba[i] = min_lc + b % len_lc # convert 0..255 to 97..122", "data == decoded def test_bson_encoding(): encoding = Encoding('bson') data = createObject(size, size) (header,", "test_encoding_header_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded", "data = createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert", "value=payload) assert data == decoded def test_encoding_no_header_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size)", "encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_no_header_bytes(): encoding = Encoding('msgpack') data =", "ba[i] = min_lc + b % len_lc # convert 0..255 to 97..122 return", "payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_no_header_bytes():", "encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_bytes(): encoding =", "createObject(size, size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data ==", "value=payload) assert data == decoded def test_encoding_no_header_object(): encoding = Encoding('msgpack') data = createObject(size,", "26 ba = bytearray(os.urandom(n)) for i, b in enumerate(ba): ba[i] = min_lc +", "<reponame>kube-HPC/python-wrapper.hkube import os import random from hkube_python_wrapper.util.encoding import Encoding size = 1 *", "bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\": {", "decoded def create_bytearray(sizeBytes): return b'\\xdd' * (sizeBytes) def randomString(n): min_lc = ord(b'a') len_lc", "test_bson_encoding(): encoding = Encoding('bson') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded", "value=header + payload) assert data == decoded def create_bytearray(sizeBytes): return b'\\xdd' * (sizeBytes)", "import os import random from hkube_python_wrapper.util.encoding import Encoding size = 1 * 1024", "encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload) assert data == decoded def create_bytearray(sizeBytes):", "Encoding('bson') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload)", "decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_msgpack_encoding(): encoding = Encoding('msgpack')", "size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data == decoded", "encoding.decode(header=header, value=payload) assert data == decoded def test_msgpack_encoding(): encoding = Encoding('msgpack') data =", "ba = bytearray(os.urandom(n)) for i, b in enumerate(ba): ba[i] = min_lc + b", "ba.decode(\"utf-8\") def randomInt(sizeBytes): return random.sample(range(0, sizeBytes), sizeBytes) def createObject(sizeBytes, sizeRandom): obj = {", "= encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_msgpack_encoding(): encoding", "convert 0..255 to 97..122 return ba.decode(\"utf-8\") def randomInt(sizeBytes): return random.sample(range(0, sizeBytes), sizeBytes) def", "encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_no_header_bytes(): encoding =", "payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_header_in_payload_bytes():", "payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_no_header_object():", "= encoding.decode(header=None, value=header + payload) assert data == decoded def test_encoding_header_in_payload_object(): encoding =", "encoding.decode(header=None, value=header + payload) assert data == decoded def test_encoding_header_in_payload_object(): encoding = Encoding('msgpack')", "decoded = encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_no_header_object(): encoding = Encoding('msgpack')", "= 26 ba = bytearray(os.urandom(n)) for i, b in enumerate(ba): ba[i] = min_lc", "= { \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\":", "os import random from hkube_python_wrapper.util.encoding import Encoding size = 1 * 1024 def", "data == decoded def create_bytearray(sizeBytes): return b'\\xdd' * (sizeBytes) def randomString(n): min_lc =", "\"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\": { \"dataString\":", "\"anotherBool\": False, \"nestedObj\": { \"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) } } return obj def", "value=header + payload) assert data == decoded def test_encoding_header_in_payload_object(): encoding = Encoding('msgpack') data", "decoded def test_bson_encoding(): encoding = Encoding('bson') data = createObject(size, size) (header, payload) =", "from hkube_python_wrapper.util.encoding import Encoding size = 1 * 1024 def test_none_encoding(): encoding =", "Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header +", "= encoding.encode(data, plainEncode=True) decoded = encoding.decode(value=encoded, plainEncode=True) assert data == decoded def test_bson_encoding():", "plainEncode=True) assert data == decoded def test_bson_encoding(): encoding = Encoding('bson') data = createObject(size,", "(sizeBytes) def randomString(n): min_lc = ord(b'a') len_lc = 26 ba = bytearray(os.urandom(n)) for", "= Encoding('msgpack') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None,", "} } return obj def createObjectJson(sizeRandom): obj = { \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom),", "= Encoding('bson') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header,", "data = createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header +", "data = create_bytearray(size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data", "= encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_no_header_object(): encoding = Encoding('msgpack') data", "size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded", "in enumerate(ba): ba[i] = min_lc + b % len_lc # convert 0..255 to", "{ \"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) } } return obj def createObjectJson(sizeRandom): obj =", "randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\": { \"dataString\": randomString(sizeRandom),", "random.sample(range(0, sizeBytes), sizeBytes) def createObject(sizeBytes, sizeRandom): obj = { \"bytesData\": bytearray(b'\\xdd' * (sizeBytes)),", "== decoded def test_encoding_header_in_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header, payload)", "bytearray(os.urandom(n)) for i, b in enumerate(ba): ba[i] = min_lc + b % len_lc", "decoded def test_encoding_header_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header, payload) =", "97..122 return ba.decode(\"utf-8\") def randomInt(sizeBytes): return random.sample(range(0, sizeBytes), sizeBytes) def createObject(sizeBytes, sizeRandom): obj", "0..255 to 97..122 return ba.decode(\"utf-8\") def randomInt(sizeBytes): return random.sample(range(0, sizeBytes), sizeBytes) def createObject(sizeBytes,", "= encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack') data", "data == decoded def test_encoding_header_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload)", "def randomInt(sizeBytes): return random.sample(range(0, sizeBytes), sizeBytes) def createObject(sizeBytes, sizeRandom): obj = { \"bytesData\":", "len_lc = 26 ba = bytearray(os.urandom(n)) for i, b in enumerate(ba): ba[i] =", "return obj def createObjectJson(sizeRandom): obj = { \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom),", "encoding = Encoding('msgpack') data = create_bytearray(size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None,", "createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data ==", "False, \"nestedObj\": { \"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) } } return obj def createObjectJson(sizeRandom):", "test_encoding_no_header_object(): encoding = Encoding('msgpack') data = createObject(size, size) (_, payload) = encoding.encode(data) decoded", "(header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def", "\"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\": { \"dataString\": randomString(sizeRandom), \"randomIntArray\":", "def test_none_encoding(): encoding = Encoding('msgpack') decoded = encoding.decode(header=None, value=None) assert decoded is None", "encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_object(): encoding =", "decoded = encoding.decode(header=None, value=None) assert decoded is None def test_json_encoding(): encoding = Encoding('json')", "= createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data", "encoding = Encoding('msgpack') data = createObject(size, size) (_, payload) = encoding.encode(data) decoded =", "decoded = encoding.decode(header=None, value=header + payload) assert data == decoded def create_bytearray(sizeBytes): return", "encoding.decode(header=None, value=header + payload) assert data == decoded def create_bytearray(sizeBytes): return b'\\xdd' *", "data == decoded def test_encoding_header_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header,", "== decoded def test_encoding_header_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header, payload)", "\"nestedObj\": { \"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) } } return obj def createObjectJson(sizeRandom): obj", "def test_encoding_header_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded", "b in enumerate(ba): ba[i] = min_lc + b % len_lc # convert 0..255", "def createObjectJson(sizeRandom): obj = { \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False,", "= { \"bytesData\": bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\":", "b % len_lc # convert 0..255 to 97..122 return ba.decode(\"utf-8\") def randomInt(sizeBytes): return", "data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data", "for i, b in enumerate(ba): ba[i] = min_lc + b % len_lc #", "Encoding('msgpack') decoded = encoding.decode(header=None, value=None) assert decoded is None def test_json_encoding(): encoding =", "= encoding.decode(header=None, value=None) assert decoded is None def test_json_encoding(): encoding = Encoding('json') data", "assert decoded is None def test_json_encoding(): encoding = Encoding('json') data = createObjectJson(size) encoded", "% len_lc # convert 0..255 to 97..122 return ba.decode(\"utf-8\") def randomInt(sizeBytes): return random.sample(range(0,", "test_encoding_header_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded =", "decoded def test_msgpack_encoding(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data)", "(_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data == decoded def", "Encoding('json') data = createObjectJson(size) encoded = encoding.encode(data, plainEncode=True) decoded = encoding.decode(value=encoded, plainEncode=True) assert", "= 1 * 1024 def test_none_encoding(): encoding = Encoding('msgpack') decoded = encoding.decode(header=None, value=None)", "encoded = encoding.encode(data, plainEncode=True) decoded = encoding.decode(value=encoded, plainEncode=True) assert data == decoded def", "encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack') data =", "bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False,", "decoded def test_encoding_no_header_object(): encoding = Encoding('msgpack') data = createObject(size, size) (_, payload) =", "randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\": { \"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) } }", "def randomString(n): min_lc = ord(b'a') len_lc = 26 ba = bytearray(os.urandom(n)) for i,", "} return obj def createObjectJson(sizeRandom): obj = { \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\":", "test_json_encoding(): encoding = Encoding('json') data = createObjectJson(size) encoded = encoding.encode(data, plainEncode=True) decoded =", "data = createObject(size, size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert", "{ \"bytesData\": bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom),", "\"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) } } return obj def createObjectJson(sizeRandom): obj = {", "== decoded def test_encoding_no_header_object(): encoding = Encoding('msgpack') data = createObject(size, size) (_, payload)", "assert data == decoded def test_encoding_header_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header,", "encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None,", "= encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload) assert data == decoded def", "decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_bytes(): encoding = Encoding('msgpack')", "sizeBytes) def createObject(sizeBytes, sizeRandom): obj = { \"bytesData\": bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes),", "value=payload) assert data == decoded def test_msgpack_encoding(): encoding = Encoding('msgpack') data = create_bytearray(size)", "encoding.decode(value=encoded, plainEncode=True) assert data == decoded def test_bson_encoding(): encoding = Encoding('bson') data =", "createObjectJson(sizeRandom): obj = { \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\":", "data == decoded def test_encoding_no_header_object(): encoding = Encoding('msgpack') data = createObject(size, size) (_,", "assert data == decoded def test_encoding_header_in_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size)", "decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_object(): encoding = Encoding('msgpack')", "return random.sample(range(0, sizeBytes), sizeBytes) def createObject(sizeBytes, sizeRandom): obj = { \"bytesData\": bytearray(b'\\xdd' *", "obj = { \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False,", "= Encoding('msgpack') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header,", "1024 def test_none_encoding(): encoding = Encoding('msgpack') decoded = encoding.decode(header=None, value=None) assert decoded is", "decoded = encoding.decode(header=None, value=header + payload) assert data == decoded def test_encoding_header_in_payload_object(): encoding", "sizeRandom): obj = { \"bytesData\": bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\":", "== decoded def test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) =", "+ b % len_lc # convert 0..255 to 97..122 return ba.decode(\"utf-8\") def randomInt(sizeBytes):", "+ payload) assert data == decoded def test_encoding_header_in_payload_object(): encoding = Encoding('msgpack') data =", "= min_lc + b % len_lc # convert 0..255 to 97..122 return ba.decode(\"utf-8\")", "decoded is None def test_json_encoding(): encoding = Encoding('json') data = createObjectJson(size) encoded =", "1 * 1024 def test_none_encoding(): encoding = Encoding('msgpack') decoded = encoding.decode(header=None, value=None) assert", "= createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload)", "def test_encoding_header_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header, payload) = encoding.encode(data)", "payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_msgpack_encoding():", "createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload) assert", "False, \"anotherBool\": False, \"nestedObj\": { \"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) } } return obj", "= Encoding('json') data = createObjectJson(size) encoded = encoding.encode(data, plainEncode=True) decoded = encoding.decode(value=encoded, plainEncode=True)", "+ payload) assert data == decoded def create_bytearray(sizeBytes): return b'\\xdd' * (sizeBytes) def", "\"bool\": False, \"anotherBool\": False, \"nestedObj\": { \"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) } } return", "size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload) assert data", "= encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_no_header_object(): encoding", "min_lc + b % len_lc # convert 0..255 to 97..122 return ba.decode(\"utf-8\") def", "data == decoded def test_encoding_no_header_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (_, payload)", "assert data == decoded def test_encoding_no_header_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (_,", "encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_object(): encoding = Encoding('msgpack') data =", "value=payload) assert data == decoded def test_encoding_header_payload_object(): encoding = Encoding('msgpack') data = createObject(size,", "def test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded", "test_encoding_header_in_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded", "assert data == decoded def create_bytearray(sizeBytes): return b'\\xdd' * (sizeBytes) def randomString(n): min_lc", "b'\\xdd' * (sizeBytes) def randomString(n): min_lc = ord(b'a') len_lc = 26 ba =", "encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_msgpack_encoding(): encoding =", "def create_bytearray(sizeBytes): return b'\\xdd' * (sizeBytes) def randomString(n): min_lc = ord(b'a') len_lc =", "test_msgpack_encoding(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded =", "= encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_no_header_bytes(): encoding", "sizeBytes), sizeBytes) def createObject(sizeBytes, sizeRandom): obj = { \"bytesData\": bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\":", "value=payload) assert data == decoded def test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size)", "== decoded def test_bson_encoding(): encoding = Encoding('bson') data = createObject(size, size) (header, payload)", "= encoding.decode(header=header, value=payload) assert data == decoded def test_msgpack_encoding(): encoding = Encoding('msgpack') data", "value=payload) assert data == decoded def test_encoding_header_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size)", "Encoding size = 1 * 1024 def test_none_encoding(): encoding = Encoding('msgpack') decoded =", "i, b in enumerate(ba): ba[i] = min_lc + b % len_lc # convert", "def test_encoding_no_header_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (_, payload) = encoding.encode(data) decoded", "= Encoding('msgpack') data = createObject(size, size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None,", "encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_no_header_object(): encoding =", "create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload) assert data", "= encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_no_header_bytes(): encoding = Encoding('msgpack') data", "create_bytearray(sizeBytes): return b'\\xdd' * (sizeBytes) def randomString(n): min_lc = ord(b'a') len_lc = 26", "return ba.decode(\"utf-8\") def randomInt(sizeBytes): return random.sample(range(0, sizeBytes), sizeBytes) def createObject(sizeBytes, sizeRandom): obj =", "encoding = Encoding('json') data = createObjectJson(size) encoded = encoding.encode(data, plainEncode=True) decoded = encoding.decode(value=encoded,", "\"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\":", "randomInt(sizeRandom) } } return obj def createObjectJson(sizeRandom): obj = { \"randomString\": randomString(sizeRandom), \"randomIntArray\":", "test_encoding_no_header_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (_, payload) = encoding.encode(data) decoded =", "= encoding.decode(value=encoded, plainEncode=True) assert data == decoded def test_bson_encoding(): encoding = Encoding('bson') data", "createObjectJson(size) encoded = encoding.encode(data, plainEncode=True) decoded = encoding.decode(value=encoded, plainEncode=True) assert data == decoded", "= encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_object(): encoding = Encoding('msgpack') data", "= encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_bytes(): encoding", "value=None) assert decoded is None def test_json_encoding(): encoding = Encoding('json') data = createObjectJson(size)", "randomString(n): min_lc = ord(b'a') len_lc = 26 ba = bytearray(os.urandom(n)) for i, b", "randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\": { \"dataString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom)", "obj def createObjectJson(sizeRandom): obj = { \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\":", "= createObjectJson(size) encoded = encoding.encode(data, plainEncode=True) decoded = encoding.decode(value=encoded, plainEncode=True) assert data ==", "payload) assert data == decoded def test_encoding_header_in_payload_object(): encoding = Encoding('msgpack') data = createObject(size,", "= Encoding('msgpack') decoded = encoding.decode(header=None, value=None) assert decoded is None def test_json_encoding(): encoding", "create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded", "decoded def test_encoding_header_in_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header, payload) =", "= create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload) assert", "= create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data ==", "encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_bytes(): encoding = Encoding('msgpack') data =", "data == decoded def test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload)", "* (sizeBytes) def randomString(n): min_lc = ord(b'a') len_lc = 26 ba = bytearray(os.urandom(n))", "= ord(b'a') len_lc = 26 ba = bytearray(os.urandom(n)) for i, b in enumerate(ba):", "payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_object():", "randomInt(sizeBytes): return random.sample(range(0, sizeBytes), sizeBytes) def createObject(sizeBytes, sizeRandom): obj = { \"bytesData\": bytearray(b'\\xdd'", "payload) assert data == decoded def create_bytearray(sizeBytes): return b'\\xdd' * (sizeBytes) def randomString(n):", "randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom) } } return obj def createObjectJson(sizeRandom): obj = { \"randomString\":", "= encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_bytes(): encoding = Encoding('msgpack') data", "decoded = encoding.decode(value=encoded, plainEncode=True) assert data == decoded def test_bson_encoding(): encoding = Encoding('bson')", "= encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_object(): encoding", "\"randomIntArray\": randomInt(sizeRandom) } } return obj def createObjectJson(sizeRandom): obj = { \"randomString\": randomString(sizeRandom),", "Encoding('msgpack') data = createObject(size, size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload)", "data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload)", "\"bytesData\": bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\":", "size = 1 * 1024 def test_none_encoding(): encoding = Encoding('msgpack') decoded = encoding.decode(header=None,", "assert data == decoded def test_msgpack_encoding(): encoding = Encoding('msgpack') data = create_bytearray(size) (header,", "= Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload)", "min_lc = ord(b'a') len_lc = 26 ba = bytearray(os.urandom(n)) for i, b in", "== decoded def test_encoding_no_header_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (_, payload) =", "test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded =", "return b'\\xdd' * (sizeBytes) def randomString(n): min_lc = ord(b'a') len_lc = 26 ba", "Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert", "payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_header_payload_bytes():", "decoded = encoding.decode(header=header, value=payload) assert data == decoded def test_encoding_no_header_bytes(): encoding = Encoding('msgpack')", "* 1024 def test_none_encoding(): encoding = Encoding('msgpack') decoded = encoding.decode(header=None, value=None) assert decoded", "data = createObjectJson(size) encoded = encoding.encode(data, plainEncode=True) decoded = encoding.decode(value=encoded, plainEncode=True) assert data", "def test_msgpack_encoding(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded", "* (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\":", "== decoded def create_bytearray(sizeBytes): return b'\\xdd' * (sizeBytes) def randomString(n): min_lc = ord(b'a')", "assert data == decoded def test_encoding_no_header_object(): encoding = Encoding('msgpack') data = createObject(size, size)", "decoded def test_encoding_no_header_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (_, payload) = encoding.encode(data)", "Encoding('msgpack') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header", "hkube_python_wrapper.util.encoding import Encoding size = 1 * 1024 def test_none_encoding(): encoding = Encoding('msgpack')", "= encoding.decode(header=None, value=header + payload) assert data == decoded def create_bytearray(sizeBytes): return b'\\xdd'", "data == decoded def test_msgpack_encoding(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload)", "= encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_header_in_payload_bytes(): encoding", "== decoded def test_msgpack_encoding(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) =", "random from hkube_python_wrapper.util.encoding import Encoding size = 1 * 1024 def test_none_encoding(): encoding", "encoding.decode(header=None, value=None) assert decoded is None def test_json_encoding(): encoding = Encoding('json') data =", "def test_bson_encoding(): encoding = Encoding('bson') data = createObject(size, size) (header, payload) = encoding.encode(data)", "(sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False,", "create_bytearray(size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data == decoded", "encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_header_in_payload_bytes(): encoding =", "import random from hkube_python_wrapper.util.encoding import Encoding size = 1 * 1024 def test_none_encoding():", "def test_json_encoding(): encoding = Encoding('json') data = createObjectJson(size) encoded = encoding.encode(data, plainEncode=True) decoded", "len_lc # convert 0..255 to 97..122 return ba.decode(\"utf-8\") def randomInt(sizeBytes): return random.sample(range(0, sizeBytes),", "decoded def test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data)", "= Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header", "ord(b'a') len_lc = 26 ba = bytearray(os.urandom(n)) for i, b in enumerate(ba): ba[i]", "Encoding('msgpack') data = create_bytearray(size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert", "encoding = Encoding('msgpack') decoded = encoding.decode(header=None, value=None) assert decoded is None def test_json_encoding():", "None def test_json_encoding(): encoding = Encoding('json') data = createObjectJson(size) encoded = encoding.encode(data, plainEncode=True)", "test_none_encoding(): encoding = Encoding('msgpack') decoded = encoding.decode(header=None, value=None) assert decoded is None def", "def test_encoding_no_header_object(): encoding = Encoding('msgpack') data = createObject(size, size) (_, payload) = encoding.encode(data)", "decoded def test_encoding_header_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data)", "assert data == decoded def test_encoding_header_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size)", "Encoding('msgpack') data = createObject(size, size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header, value=payload)", "decoded = encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack')", "data == decoded def test_encoding_header_in_payload_object(): encoding = Encoding('msgpack') data = createObject(size, size) (header,", "= create_bytearray(size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload) assert data ==", "encoding.decode(header=None, value=payload) assert data == decoded def test_encoding_no_header_object(): encoding = Encoding('msgpack') data =", "import Encoding size = 1 * 1024 def test_none_encoding(): encoding = Encoding('msgpack') decoded", "encoding = Encoding('msgpack') data = create_bytearray(size) (header, payload) = encoding.encode(data) decoded = encoding.decode(header=header,", "= Encoding('msgpack') data = create_bytearray(size) (_, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=payload)", "(header, payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload) assert data ==", "{ \"randomString\": randomString(sizeRandom), \"randomIntArray\": randomInt(sizeRandom), \"dataString\": randomString(sizeRandom), \"bool\": False, \"anotherBool\": False, \"nestedObj\": {", "def createObject(sizeBytes, sizeRandom): obj = { \"bytesData\": bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\":", "payload) = encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload) assert data == decoded", "assert data == decoded def test_bson_encoding(): encoding = Encoding('bson') data = createObject(size, size)", "createObject(sizeBytes, sizeRandom): obj = { \"bytesData\": bytearray(b'\\xdd' * (sizeBytes)), \"anotherBytesData\": bytearray(sizeBytes), \"randomString\": randomString(sizeRandom),", "assert data == decoded def test_encoding_header_in_payload_bytes(): encoding = Encoding('msgpack') data = create_bytearray(size) (header,", "to 97..122 return ba.decode(\"utf-8\") def randomInt(sizeBytes): return random.sample(range(0, sizeBytes), sizeBytes) def createObject(sizeBytes, sizeRandom):", "# convert 0..255 to 97..122 return ba.decode(\"utf-8\") def randomInt(sizeBytes): return random.sample(range(0, sizeBytes), sizeBytes)", "encoding.encode(data) decoded = encoding.decode(header=None, value=header + payload) assert data == decoded def test_encoding_header_in_payload_object():", "is None def test_json_encoding(): encoding = Encoding('json') data = createObjectJson(size) encoded = encoding.encode(data," ]
[ "# figure out how many of the top rows are column headers column_header_rows", "have a different condition because we have cut out some rows if _row_or_col_is_header(s_count,", "for j in zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\" Utility function for subdivide \"\"\" if", "j] = categorize_cell_string(table[i, j]) # figure out how many of the top rows", "elif stack[-1][-1] >= curr[0]: prev = stack.pop() merged = sorted(list(set(prev + curr))) stack.append(merged)", "for subdivide \"\"\" if row_flag: row_or_col_list = [table[i, :] for i in inds]", "preserves order \"\"\" seen = set() seen_add = seen.add return [x for x", "len(row_headers): row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged, row_headers, numerical_subtable def _combine_omnipage_cell_list(table, inds,", "row_headers[i] + ' - ' + row_headers[append_row] row_headers = [row_headers[i] for i in", "(s_count + 1) / (v_count + s_count + 1) >= 2. / 3.", "_row_or_col_is_header(s_count, v_count): \"\"\" Utility function for subdivide Heuristic for whether a row/col is", "= np.where(empty_cols > 0.9)[0] ind_ranges_to_merge = [[i - 1, i] for i in", "or empty - Figure out which of the top rows are column headers", "in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T # if rows of the numerical", "that are not contiguous with the top columns? # get headers column_headers =", "a numpy array TODO: Common problem: \"n (%)\" columns are often split up", "only care about the rows/columns that span the numerical subtable column_headers = column_headers[first_non_header_col_ind:]", "_get_and_increment_last(column_header_rows) row_header_columns = [] for i in range(0, table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:, i]", "np.nan np.nan Male 50 30 Female 30 20 For this case, the strong", "we only care about the rows/columns that span the numerical subtable column_headers =", "20 For this case, the strong 'Sex' is pre-pended to 'Male' and 'Female'", "stack.append(merged) else: prev = stack.pop() result.append(prev) stack.append(curr) result += stack return result def", "[' '.join(_unique_sorted([str(k) for k in j])).strip() for j in zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\"", "column headers column_header_rows = [] for i in range(0, table.nrow): # sometimes the", "[numerical_columns[i] for i in ind_range_to_merge] merged_cols = [' '.join(_unique_sorted(j)).strip() for j in zip(*subcols)]", "for i in ind_range_to_merge] merged_cols = [' '.join(_unique_sorted(j)).strip() for j in zip(*subcols)] numerical_columns_merged.append(merged_cols)", "s_count + 1) >= 2. / 3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function for", "'S') v_count = np.sum(table_categories[i, :] == 'V') if all_rows_flag or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i)", "For this case, the strong 'Sex' is pre-pended to 'Male' and 'Female' to", "for subdivide Some rows headers actually apply to subsequent rows. E.g.: Sex np.nan", "in ind_range_to_merge] merged_cols = [' '.join(_unique_sorted(j)).strip() for j in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable", "TODO: maybe find other columns that are not contiguous with the top columns?", "'%'/'(%)' respectively, then they should be concatenated \"\"\" # first, categorize each cell", "merge columns to previous one if the column is mostly empty empty_cols =", "else: break # as soon as this is false, we quit # TODO:", "30 20 For this case, the strong 'Sex' is pre-pended to 'Male' and", "the column that isn't the header col = [str(i) for i in table[:,", "len(g)))) i += len(g) # combine overlapping merging index ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge)", "np.sum(table_categories[i, :] == 'V') if all_rows_flag or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else: break #", "headers # until the next empty set of rows # also sometimes there", "they have the same headers i = 0 for k, g in groupby(column_headers):", "same headers i = 0 for k, g in groupby(column_headers): g = list(g)", "rows are column headers column_header_rows = [] for i in range(0, table.nrow): #", "row headers, so we have to ensure the lens match if len(numerical_subtable) >", "curr in ind_ranges_to_merge: if len(stack) == 0: stack.append(curr) elif stack[-1][-1] >= curr[0]: prev", "[] column_headers_merged = [] for ind_range_to_merge in ind_ranges_to_merge: subcols = [numerical_columns[i] for i", "30 Sex - Female 30 20 \"\"\" empty_flag = (numerical_subtable == '').mean(1) ==", "previous one if the column is mostly empty empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:]", "for subdivide \"\"\" if len(l) > 0: return l[-1] + 1 else: return", "merging index ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) # perform the merge # note: only", "np.sum(table_categories[first_non_header_row_ind:, i] == 'S') v_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'V') # TODO: maybe", "1)] for i, append_rows in zip(empty_rows, all_append_rows): for append_row in append_rows: row_headers[append_row] =", "strong 'Sex' is pre-pended to 'Male' and 'Female' to get: Sex - Male", "column_headers_merged, row_headers, numerical_subtable def _combine_omnipage_cell_list(table, inds, row_flag): \"\"\" Utility function for subdivide \"\"\"", "top rows are column headers -> combine them - Figure out which of", "there are no column header rows if len(column_headers) == 0: column_headers = ['col_'", "for i in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) # we only care about the rows/columns", "false, we quit # TODO: maybe find other rows that are not contiguous", "s_count = np.sum(table_categories[i, :] == 'S') v_count = np.sum(table_categories[i, :] == 'V') if", "or not. \"\"\" if s_count == 1 and v_count == 1: return False", "prev = stack.pop() merged = sorted(list(set(prev + curr))) stack.append(merged) else: prev = stack.pop()", "len(numerical_subtable) == len(row_headers): row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged, row_headers, numerical_subtable def", "append_rows: row_headers[append_row] = row_headers[i] + ' - ' + row_headers[append_row] row_headers = [row_headers[i]", "in seen or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility function for subdivide Some", "ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) # perform the merge # note: only merge the", "maybe find other columns that are not contiguous with the top columns? #", "table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) # we only care about the rows/columns that span the", "function for subdivide \"\"\" if row_flag: row_or_col_list = [table[i, :] for i in", "[] result = [] for curr in ind_ranges_to_merge: if len(stack) == 0: stack.append(curr)", "- ' + row_headers[append_row] row_headers = [row_headers[i] for i in non_empty_rows] numerical_subtable =", "rows if len(column_headers) == 0: column_headers = ['col_' + str(i) for i in", "get: Sex - Male 50 30 Sex - Female 30 20 \"\"\" empty_flag", "cut out some rows if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else: break # TODO: maybe", "+ 1])) for i in range(len(empty_rows) - 1)] for i, append_rows in zip(empty_rows,", "append_rows in zip(empty_rows, all_append_rows): for append_row in append_rows: row_headers[append_row] = row_headers[i] + '", "contiguous with the top rows? # figure out how many of the leftmost", "== table.ncol) # check if the number of strings is more than 2/3s", "else: prev = stack.pop() result.append(prev) stack.append(curr) result += stack return result def _unique_sorted(seq):", "Male 50 30 Sex - Female 30 20 \"\"\" empty_flag = (numerical_subtable ==", "numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T # if rows of the numerical subtable are", "from pubmedextract.sex_utils.regex_utils import categorize_cell_string def subdivide(table): \"\"\" - Categorize each cell as string,", "= _combine_omnipage_cell_list(table, row_header_columns, row_flag=False) # edge case if there are no column header", "[list(range(empty_rows[i] + 1, empty_rows[i + 1])) for i in range(len(empty_rows) - 1)] for", "'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols > 0.9)[0] ind_ranges_to_merge = [[i - 1, i] for", "col = [str(i) for i in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) # we only care", "in j])).strip() for j in zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\" Utility function for subdivide", "a row/col is a header or not. \"\"\" if s_count == 1 and", "the leftmost columns are row headers -> combine them - Put the remaining", "with the top rows? # figure out how many of the leftmost columns", "= 0 for k, g in groupby(column_headers): g = list(g) ind_ranges_to_merge.append(list(range(i, i +", "row_flag=True) row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False) # edge case if there are no", "= [' '.join(_unique_sorted(j)).strip() for j in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T #", "row_headers = row_headers[first_non_header_row_ind:] # merge columns to previous one if the column is", "_row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else: break # as soon as this is false, we", "['col_' + str(i) for i in range(table.ncol)] # get numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns)", "numerical_subtable): \"\"\" Utility function for subdivide Some rows headers actually apply to subsequent", "apply to subsequent rows. E.g.: Sex np.nan np.nan Male 50 30 Female 30", "in ind_ranges_to_merge: if len(stack) == 0: stack.append(curr) elif stack[-1][-1] >= curr[0]: prev =", "headers, so we have to ensure the lens match if len(numerical_subtable) > 1", "header col = [str(i) for i in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) # we only", "then they should be concatenated \"\"\" # first, categorize each cell table_categories =", "we have cut out some rows if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else: break #", "= [] for i in range(0, table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'S')", "= [] for col in range(first_non_header_col_ind, table.ncol): # extract the part of the", "0] # merge columns if they have the same headers i = 0", "result = [] for curr in ind_ranges_to_merge: if len(stack) == 0: stack.append(curr) elif", "curr[0]: prev = stack.pop() merged = sorted(list(set(prev + curr))) stack.append(merged) else: prev =", "break # as soon as this is false, we quit # TODO: maybe", "if i > 0] # merge columns if they have the same headers", "the part of the column that isn't the header col = [str(i) for", "columns are often split up by Omnipage! If two adjacent columns have column", "= np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for i in range(table.nrow): for j in range(table.ncol): table_categories[i,", "v_count): column_header_rows.append(i) else: break # as soon as this is false, we quit", "one if the column is mostly empty empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds", "g = list(g) ind_ranges_to_merge.append(list(range(i, i + len(g)))) i += len(g) # combine overlapping", "values but preserves order \"\"\" seen = set() seen_add = seen.add return [x", "= ['col_' + str(i) for i in range(table.ncol)] # get numerical_subtable first_non_header_col_ind =", "[[i - 1, i] for i in empty_col_inds if i > 0] #", "= _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged, row_headers, numerical_subtable def _combine_omnipage_cell_list(table, inds, row_flag): \"\"\" Utility", "+ s_count + 1) >= 2. / 3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function", "to subsequent rows. E.g.: Sex np.nan np.nan Male 50 30 Female 30 20", "= _get_and_increment_last(row_header_columns) numerical_columns = [] for col in range(first_non_header_col_ind, table.ncol): # extract the", "empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i + 1])) for", "column headers first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns = [] for i in range(0, table.ncol):", "them - Figure out which of the leftmost columns are row headers ->", "# and splayed across many rows. detect that here: all_rows_flag = (table[i, 0].indices[-1][1]", "in range(first_non_header_col_ind, table.ncol): # extract the part of the column that isn't the", "= (numerical_subtable == '').mean(1) == 1 empty_rows = list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0] if", "this row's header can be appended to all the subsequent row headers #", "for i in range(table.ncol)] # get numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns = []", "'Female' to get: Sex - Male 50 30 Sex - Female 30 20", "[] for curr in ind_ranges_to_merge: if len(stack) == 0: stack.append(curr) elif stack[-1][-1] >=", "len(row_headers): empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i + 1])) for i in", "the cell contents if they are not identical numerical_columns_merged = [] column_headers_merged =", "1 == table.ncol) # check if the number of strings is more than", "be concatenated \"\"\" # first, categorize each cell table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_)", "columns if they have the same headers i = 0 for k, g", "Put the remaining subtable into a numpy array TODO: Common problem: \"n (%)\"", "False else: return (s_count + 1) / (v_count + s_count + 1) >=", "remaining subtable into a numpy array TODO: Common problem: \"n (%)\" columns are", "out which of the top rows are column headers -> combine them -", "[4,5]] \"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack = [] result = [] for curr", "= [] for curr in ind_ranges_to_merge: if len(stack) == 0: stack.append(curr) elif stack[-1][-1]", "= np.sum(table_categories[i, :] == 'S') v_count = np.sum(table_categories[i, :] == 'V') if all_rows_flag", "caption gets lobbed into the first column # and splayed across many rows.", "stack return result def _unique_sorted(seq): \"\"\" Utility function for subdivide Keeps unique values", "for k in j])).strip() for j in zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\" Utility function", "np.sum(table_categories[i, :] == 'S') v_count = np.sum(table_categories[i, :] == 'V') if all_rows_flag or", "edge case if there are no column header rows if len(column_headers) == 0:", "contents if they are not identical numerical_columns_merged = [] column_headers_merged = [] for", "\"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack = [] result = [] for curr in", "them - Put the remaining subtable into a numpy array TODO: Common problem:", "headers first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns = [] for i in range(0, table.ncol): s_count", "integer ranges. Example [[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge", "for k, g in groupby(column_headers): g = list(g) ind_ranges_to_merge.append(list(range(i, i + len(g)))) i", "we quit # TODO: maybe find other rows that are not contiguous with", "= [numerical_columns[i] for i in ind_range_to_merge] merged_cols = [' '.join(_unique_sorted(j)).strip() for j in", "# then this row's header can be appended to all the subsequent row", "pre-pended to 'Male' and 'Female' to get: Sex - Male 50 30 Sex", "= row_headers[first_non_header_row_ind:] # merge columns to previous one if the column is mostly", "row headers # until the next empty set of rows # also sometimes", "index ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) # perform the merge # note: only merge", ">= curr[0]: prev = stack.pop() merged = sorted(list(set(prev + curr))) stack.append(merged) else: prev", "range(len(empty_rows) - 1)] for i, append_rows in zip(empty_rows, all_append_rows): for append_row in append_rows:", "that are not contiguous with the top rows? # figure out how many", "1, i] for i in empty_col_inds if i > 0] # merge columns", "return [' '.join(_unique_sorted([str(k) for k in j])).strip() for j in zip(*row_or_col_list)] def _get_and_increment_last(l):", "Example [[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge)", "TODO: maybe find other rows that are not contiguous with the top rows?", "s_count == 1 and v_count == 1: return False else: return (s_count +", "stack.pop() merged = sorted(list(set(prev + curr))) stack.append(merged) else: prev = stack.pop() result.append(prev) stack.append(curr)", "the top rows are column headers -> combine them - Figure out which", "in inds] else: row_or_col_list = [table[:, i] for i in inds] return ['", "= _combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False) # edge case if", "as this is false, we quit # TODO: maybe find other rows that", "headers column_header_rows = [] for i in range(0, table.nrow): # sometimes the caption", "[] for col in range(first_non_header_col_ind, table.ncol): # extract the part of the column", "import numpy as np from pubmedextract.sex_utils.regex_utils import categorize_cell_string def subdivide(table): \"\"\" - Categorize", "_append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility function for subdivide Some rows headers actually apply to", "= [] column_headers_merged = [] for ind_range_to_merge in ind_ranges_to_merge: subcols = [numerical_columns[i] for", "cell table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for i in range(table.nrow): for j in", "= stack.pop() merged = sorted(list(set(prev + curr))) stack.append(merged) else: prev = stack.pop() result.append(prev)", "def subdivide(table): \"\"\" - Categorize each cell as string, value, or empty -", "- Figure out which of the top rows are column headers -> combine", "figure out how many of the top rows are column headers column_header_rows =", "(table[i, 0].indices[-1][1] + 1 == table.ncol) # check if the number of strings", "in range(table.nrow): for j in range(table.ncol): table_categories[i, j] = categorize_cell_string(table[i, j]) # figure", "for i in range(len(empty_rows) - 1)] for i, append_rows in zip(empty_rows, all_append_rows): for", "# perform the merge # note: only merge the cell contents if they", "perform the merge # note: only merge the cell contents if they are", "Utility function for subdivide \"\"\" if len(l) > 0: return l[-1] + 1", "def _row_or_col_is_header(s_count, v_count): \"\"\" Utility function for subdivide Heuristic for whether a row/col", "[3], [4,5], [5]] -> [[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack = []", "detect that here: all_rows_flag = (table[i, 0].indices[-1][1] + 1 == table.ncol) # check", "subdivide \"\"\" if len(l) > 0: return l[-1] + 1 else: return 0", "else: break # TODO: maybe find other columns that are not contiguous with", "0: column_headers = ['col_' + str(i) for i in range(table.ncol)] # get numerical_subtable", "empty_col_inds = np.where(empty_cols > 0.9)[0] ind_ranges_to_merge = [[i - 1, i] for i", "respectively, then they should be concatenated \"\"\" # first, categorize each cell table_categories", "itertools import groupby import numpy as np from pubmedextract.sex_utils.regex_utils import categorize_cell_string def subdivide(table):", "that combines overlapping integer ranges. Example [[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3],", "Keeps unique values but preserves order \"\"\" seen = set() seen_add = seen.add", "columns? # get headers column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers = _combine_omnipage_cell_list(table, row_header_columns,", "[] for i in range(0, table.nrow): # sometimes the caption gets lobbed into", "entire row s_count = np.sum(table_categories[i, :] == 'S') v_count = np.sum(table_categories[i, :] ==", "== 0: column_headers = ['col_' + str(i) for i in range(table.ncol)] # get", "row_header_columns = [] for i in range(0, table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:, i] ==", "return [x for x in seq if not (x in seen or seen_add(x))]", "Omnipage! If two adjacent columns have column headers that end with 'n' and", "and 'Female' to get: Sex - Male 50 30 Sex - Female 30", "out some rows if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else: break # TODO: maybe find", "for curr in ind_ranges_to_merge: if len(stack) == 0: stack.append(curr) elif stack[-1][-1] >= curr[0]:", "than 2/3s of the entire row s_count = np.sum(table_categories[i, :] == 'S') v_count", "== 'V') # TODO: maybe have a different condition because we have cut", "len(stack) == 0: stack.append(curr) elif stack[-1][-1] >= curr[0]: prev = stack.pop() merged =", "is a header or not. \"\"\" if s_count == 1 and v_count ==", "# get numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns = [] for col in range(first_non_header_col_ind,", "for i in range(table.nrow): for j in range(table.ncol): table_categories[i, j] = categorize_cell_string(table[i, j])", "i in empty_col_inds if i > 0] # merge columns if they have", "[2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack =", "\"\"\" # first, categorize each cell table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for i", "\"\"\" Utility function for subdivide Keeps unique values but preserves order \"\"\" seen", "return (s_count + 1) / (v_count + s_count + 1) >= 2. /", "= stack.pop() result.append(prev) stack.append(curr) result += stack return result def _unique_sorted(seq): \"\"\" Utility", "i in range(0, table.nrow): # sometimes the caption gets lobbed into the first", "range(first_non_header_col_ind, table.ncol): # extract the part of the column that isn't the header", "the rows/columns that span the numerical subtable column_headers = column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:]", "all_rows_flag = (table[i, 0].indices[-1][1] + 1 == table.ncol) # check if the number", "range(0, table.nrow): # sometimes the caption gets lobbed into the first column #", "the top rows are column headers column_header_rows = [] for i in range(0,", "have the same headers i = 0 for k, g in groupby(column_headers): g", "in zip(empty_rows, all_append_rows): for append_row in append_rows: row_headers[append_row] = row_headers[i] + ' -", "i, append_rows in zip(empty_rows, all_append_rows): for append_row in append_rows: row_headers[append_row] = row_headers[i] +", "list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0] if len(empty_rows) > 0: if empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers))", "subtable are all empty # then this row's header can be appended to", "as soon as this is false, we quit # TODO: maybe find other", "= list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0] if len(empty_rows) > 0: if empty_rows[-1] != len(row_headers):", "in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) # we only care about the rows/columns that span", "cell contents if they are not identical numerical_columns_merged = [] column_headers_merged = []", "return column_headers_merged, row_headers, numerical_subtable def _combine_omnipage_cell_list(table, inds, row_flag): \"\"\" Utility function for subdivide", "the column is mostly empty empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols", "else: row_or_col_list = [table[:, i] for i in inds] return [' '.join(_unique_sorted([str(k) for", "# first, categorize each cell table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for i in", "rows. detect that here: all_rows_flag = (table[i, 0].indices[-1][1] + 1 == table.ncol) #", "top rows? # figure out how many of the leftmost columns are row", "number of strings is more than 2/3s of the entire row s_count =", "or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility function for subdivide Some rows headers", "_combine_ind_ranges(ind_ranges_to_merge) # perform the merge # note: only merge the cell contents if", "curr))) stack.append(merged) else: prev = stack.pop() result.append(prev) stack.append(curr) result += stack return result", "columns are row headers -> combine them - Put the remaining subtable into", "1 empty_rows = list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0] if len(empty_rows) > 0: if empty_rows[-1]", "rows/columns that span the numerical subtable column_headers = column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:] #", "-> [[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack = [] result = []", "seen or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility function for subdivide Some rows", "50 30 Sex - Female 30 20 \"\"\" empty_flag = (numerical_subtable == '').mean(1)", "' - ' + row_headers[append_row] row_headers = [row_headers[i] for i in non_empty_rows] numerical_subtable", "row_or_col_list = [table[i, :] for i in inds] else: row_or_col_list = [table[:, i]", "that here: all_rows_flag = (table[i, 0].indices[-1][1] + 1 == table.ncol) # check if", "function for subdivide \"\"\" if len(l) > 0: return l[-1] + 1 else:", "if s_count == 1 and v_count == 1: return False else: return (s_count", "because we have cut out some rows if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else: break", "seq if not (x in seen or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility", "# TODO: maybe find other columns that are not contiguous with the top", "empty_rows = list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0] if len(empty_rows) > 0: if empty_rows[-1] !=", "range(table.nrow): for j in range(table.ncol): table_categories[i, j] = categorize_cell_string(table[i, j]) # figure out", "rows that are not contiguous with the top rows? # figure out how", "categorize_cell_string def subdivide(table): \"\"\" - Categorize each cell as string, value, or empty", "in range(table.ncol): table_categories[i, j] = categorize_cell_string(table[i, j]) # figure out how many of", "[table[i, :] for i in inds] else: row_or_col_list = [table[:, i] for i", "empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i + 1])) for i in range(len(empty_rows)", "string, value, or empty - Figure out which of the top rows are", "Function that combines overlapping integer ranges. Example [[1,2,3], [2,3], [3], [4,5], [5]] ->", "is false, we quit # TODO: maybe find other rows that are not", "# if rows of the numerical subtable are all empty # then this", "i in range(table.nrow): for j in range(table.ncol): table_categories[i, j] = categorize_cell_string(table[i, j]) #", "== 'S') v_count = np.sum(table_categories[i, :] == 'V') if all_rows_flag or _row_or_col_is_header(s_count, v_count):", "sometimes there are no row headers, so we have to ensure the lens", "in seq if not (x in seen or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\"", "header rows if len(column_headers) == 0: column_headers = ['col_' + str(i) for i", "= list(g) ind_ranges_to_merge.append(list(range(i, i + len(g)))) i += len(g) # combine overlapping merging", "of the numerical subtable are all empty # then this row's header can", "[] for ind_range_to_merge in ind_ranges_to_merge: subcols = [numerical_columns[i] for i in ind_range_to_merge] merged_cols", "# sometimes the caption gets lobbed into the first column # and splayed", "lobbed into the first column # and splayed across many rows. detect that", "with column headers first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns = [] for i in range(0,", "_unique_sorted(seq): \"\"\" Utility function for subdivide Keeps unique values but preserves order \"\"\"", "headers that end with 'n' and '%'/'(%)' respectively, then they should be concatenated", "row_headers[append_row] = row_headers[i] + ' - ' + row_headers[append_row] row_headers = [row_headers[i] for", "= [str(i) for i in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) # we only care about", "headers -> combine them - Figure out which of the leftmost columns are", "result def _unique_sorted(seq): \"\"\" Utility function for subdivide Keeps unique values but preserves", "i + len(g)))) i += len(g) # combine overlapping merging index ranges ind_ranges_to_merge", "with 'n' and '%'/'(%)' respectively, then they should be concatenated \"\"\" # first,", "[table[:, i] for i in inds] return [' '.join(_unique_sorted([str(k) for k in j])).strip()", "rows if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else: break # TODO: maybe find other columns", "i += len(g) # combine overlapping merging index ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) #", "unique values but preserves order \"\"\" seen = set() seen_add = seen.add return", "are all empty # then this row's header can be appended to all", "maybe have a different condition because we have cut out some rows if", "column_headers = column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:] # merge columns to previous one if", "case if there are no column header rows if len(column_headers) == 0: column_headers", "that isn't the header col = [str(i) for i in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col)", "merged = sorted(list(set(prev + curr))) stack.append(merged) else: prev = stack.pop() result.append(prev) stack.append(curr) result", "\"\"\" Utility function for subdivide Some rows headers actually apply to subsequent rows.", "i in inds] return [' '.join(_unique_sorted([str(k) for k in j])).strip() for j in", "in range(table.ncol)] # get numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns = [] for col", "return False else: return (s_count + 1) / (v_count + s_count + 1)", "check if the number of strings is more than 2/3s of the entire", "= [] for ind_range_to_merge in ind_ranges_to_merge: subcols = [numerical_columns[i] for i in ind_range_to_merge]", "case, the strong 'Sex' is pre-pended to 'Male' and 'Female' to get: Sex", "to 'Male' and 'Female' to get: Sex - Male 50 30 Sex -", "ranges. Example [[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge =", "row_header_columns, row_flag=False) # edge case if there are no column header rows if", "0 def _row_or_col_is_header(s_count, v_count): \"\"\" Utility function for subdivide Heuristic for whether a", "-> combine them - Figure out which of the leftmost columns are row", "== 1 empty_rows = list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0] if len(empty_rows) > 0: if", "excluding rows with column headers first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns = [] for i", "for ind_range_to_merge in ind_ranges_to_merge: subcols = [numerical_columns[i] for i in ind_range_to_merge] merged_cols =", "pubmedextract.sex_utils.regex_utils import categorize_cell_string def subdivide(table): \"\"\" - Categorize each cell as string, value,", "the lens match if len(numerical_subtable) > 1 and len(numerical_subtable) == len(row_headers): row_headers, numerical_subtable", "/ 3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function for subdivide Function that combines overlapping", "all_rows_flag or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else: break # as soon as this is", "seen_add = seen.add return [x for x in seq if not (x in", "if len(numerical_subtable) > 1 and len(numerical_subtable) == len(row_headers): row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable)", "def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility function for subdivide Some rows headers actually apply", "not identical numerical_columns_merged = [] column_headers_merged = [] for ind_range_to_merge in ind_ranges_to_merge: subcols", "each cell table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for i in range(table.nrow): for j", "> 1 and len(numerical_subtable) == len(row_headers): row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged,", "cell as string, value, or empty - Figure out which of the top", "of strings is more than 2/3s of the entire row s_count = np.sum(table_categories[i,", "row_headers[append_row] row_headers = [row_headers[i] for i in non_empty_rows] numerical_subtable = numerical_subtable[non_empty_rows] return row_headers,", "== 'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols > 0.9)[0] ind_ranges_to_merge = [[i - 1, i]", "range(table.ncol): table_categories[i, j] = categorize_cell_string(table[i, j]) # figure out how many of the", "numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns = [] for col in range(first_non_header_col_ind, table.ncol): #", "_append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged, row_headers, numerical_subtable def _combine_omnipage_cell_list(table, inds, row_flag): \"\"\" Utility function", "if the number of strings is more than 2/3s of the entire row", "leftmost columns are row headers -> combine them - Put the remaining subtable", "of the top rows are column headers -> combine them - Figure out", "the remaining subtable into a numpy array TODO: Common problem: \"n (%)\" columns", "30 Female 30 20 For this case, the strong 'Sex' is pre-pended to", "in range(0, table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'S') v_count = np.sum(table_categories[first_non_header_row_ind:, i]", "for subdivide Function that combines overlapping integer ranges. Example [[1,2,3], [2,3], [3], [4,5],", "isn't the header col = [str(i) for i in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) #", "[5]] -> [[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack = [] result =", "/ (v_count + s_count + 1) >= 2. / 3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\"", "ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) # perform the merge # note: only merge the cell", "if len(l) > 0: return l[-1] + 1 else: return 0 def _row_or_col_is_header(s_count,", "stack[-1][-1] >= curr[0]: prev = stack.pop() merged = sorted(list(set(prev + curr))) stack.append(merged) else:", "E.g.: Sex np.nan np.nan Male 50 30 Female 30 20 For this case,", "row_headers = [row_headers[i] for i in non_empty_rows] numerical_subtable = numerical_subtable[non_empty_rows] return row_headers, numerical_subtable", "set() seen_add = seen.add return [x for x in seq if not (x", "# note: only merge the cell contents if they are not identical numerical_columns_merged", "i] == 'S') v_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'V') # TODO: maybe have", "j]) # figure out how many of the top rows are column headers", "'V') if all_rows_flag or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else: break # as soon as", "numerical_columns.append(col) # we only care about the rows/columns that span the numerical subtable", "groupby(column_headers): g = list(g) ind_ranges_to_merge.append(list(range(i, i + len(g)))) i += len(g) # combine", "rows? # figure out how many of the leftmost columns are row headers", "l[-1] + 1 else: return 0 def _row_or_col_is_header(s_count, v_count): \"\"\" Utility function for", "row's header can be appended to all the subsequent row headers # until", "list(g) ind_ranges_to_merge.append(list(range(i, i + len(g)))) i += len(g) # combine overlapping merging index", "(table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols > 0.9)[0] ind_ranges_to_merge = [[i - 1,", "are column headers -> combine them - Figure out which of the leftmost", "find other columns that are not contiguous with the top columns? # get", "headers i = 0 for k, g in groupby(column_headers): g = list(g) ind_ranges_to_merge.append(list(range(i,", "numpy as np from pubmedextract.sex_utils.regex_utils import categorize_cell_string def subdivide(table): \"\"\" - Categorize each", "also sometimes there are no row headers, so we have to ensure the", "_combine_omnipage_cell_list(table, inds, row_flag): \"\"\" Utility function for subdivide \"\"\" if row_flag: row_or_col_list =", "'.join(_unique_sorted([str(k) for k in j])).strip() for j in zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\" Utility", "subdivide Some rows headers actually apply to subsequent rows. E.g.: Sex np.nan np.nan", "TODO: Common problem: \"n (%)\" columns are often split up by Omnipage! If", "merge # note: only merge the cell contents if they are not identical", "else: return 0 def _row_or_col_is_header(s_count, v_count): \"\"\" Utility function for subdivide Heuristic for", "+ str(i) for i in range(table.ncol)] # get numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns", "problem: \"n (%)\" columns are often split up by Omnipage! If two adjacent", "categorize_cell_string(table[i, j]) # figure out how many of the top rows are column", "(x in seen or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility function for subdivide", "j in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T # if rows of the", "3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function for subdivide Function that combines overlapping integer", "row_flag=False) # edge case if there are no column header rows if len(column_headers)", "of the entire row s_count = np.sum(table_categories[i, :] == 'S') v_count = np.sum(table_categories[i,", "different condition because we have cut out some rows if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i)", "if all_rows_flag or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else: break # as soon as this", "row_header_columns.append(i) else: break # TODO: maybe find other columns that are not contiguous", "they should be concatenated \"\"\" # first, categorize each cell table_categories = np.zeros((table.nrow,", "_combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False) # edge case if there", "[' '.join(_unique_sorted(j)).strip() for j in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T # if", "np.nan Male 50 30 Female 30 20 For this case, the strong 'Sex'", "numerical_columns_merged = [] column_headers_merged = [] for ind_range_to_merge in ind_ranges_to_merge: subcols = [numerical_columns[i]", "ind_range_to_merge in ind_ranges_to_merge: subcols = [numerical_columns[i] for i in ind_range_to_merge] merged_cols = ['", "next empty set of rows # also sometimes there are no row headers,", "a header or not. \"\"\" if s_count == 1 and v_count == 1:", "but preserves order \"\"\" seen = set() seen_add = seen.add return [x for", "Common problem: \"n (%)\" columns are often split up by Omnipage! If two", "= np.sum(table_categories[first_non_header_row_ind:, i] == 'V') # TODO: maybe have a different condition because", "numerical_subtable) return column_headers_merged, row_headers, numerical_subtable def _combine_omnipage_cell_list(table, inds, row_flag): \"\"\" Utility function for", "\"\"\" if s_count == 1 and v_count == 1: return False else: return", "combine them - Put the remaining subtable into a numpy array TODO: Common", "[] for i in range(0, table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'S') v_count", "are no row headers, so we have to ensure the lens match if", "= [] for i in range(0, table.nrow): # sometimes the caption gets lobbed", "v_count): row_header_columns.append(i) else: break # TODO: maybe find other columns that are not", "len(l) > 0: return l[-1] + 1 else: return 0 def _row_or_col_is_header(s_count, v_count):", "stack.append(curr) elif stack[-1][-1] >= curr[0]: prev = stack.pop() merged = sorted(list(set(prev + curr)))", "return 0 def _row_or_col_is_header(s_count, v_count): \"\"\" Utility function for subdivide Heuristic for whether", "and v_count == 1: return False else: return (s_count + 1) / (v_count", "the caption gets lobbed into the first column # and splayed across many", "we have to ensure the lens match if len(numerical_subtable) > 1 and len(numerical_subtable)", "for j in range(table.ncol): table_categories[i, j] = categorize_cell_string(table[i, j]) # figure out how", "0.9)[0] ind_ranges_to_merge = [[i - 1, i] for i in empty_col_inds if i", "about the rows/columns that span the numerical subtable column_headers = column_headers[first_non_header_col_ind:] row_headers =", "and '%'/'(%)' respectively, then they should be concatenated \"\"\" # first, categorize each", "value, or empty - Figure out which of the top rows are column", "gets lobbed into the first column # and splayed across many rows. detect", "\"\"\" Utility function for subdivide Heuristic for whether a row/col is a header", "== '').mean(1) == 1 empty_rows = list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0] if len(empty_rows) >", "+ ' - ' + row_headers[append_row] row_headers = [row_headers[i] for i in non_empty_rows]", "\"\"\" if row_flag: row_or_col_list = [table[i, :] for i in inds] else: row_or_col_list", "+ row_headers[append_row] row_headers = [row_headers[i] for i in non_empty_rows] numerical_subtable = numerical_subtable[non_empty_rows] return", "+ 1 else: return 0 def _row_or_col_is_header(s_count, v_count): \"\"\" Utility function for subdivide", "1 else: return 0 def _row_or_col_is_header(s_count, v_count): \"\"\" Utility function for subdivide Heuristic", "column_headers_merged = [] for ind_range_to_merge in ind_ranges_to_merge: subcols = [numerical_columns[i] for i in", "column_header_rows.append(i) else: break # as soon as this is false, we quit #", "import groupby import numpy as np from pubmedextract.sex_utils.regex_utils import categorize_cell_string def subdivide(table): \"\"\"", "column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:] # merge columns to previous one if the column", "= sorted(list(set(prev + curr))) stack.append(merged) else: prev = stack.pop() result.append(prev) stack.append(curr) result +=", "empty - Figure out which of the top rows are column headers ->", "> 0.9)[0] ind_ranges_to_merge = [[i - 1, i] for i in empty_col_inds if", "30 20 \"\"\" empty_flag = (numerical_subtable == '').mean(1) == 1 empty_rows = list(np.where(empty_flag)[0])", "into the first column # and splayed across many rows. detect that here:", "1 and len(numerical_subtable) == len(row_headers): row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged, row_headers,", "table.ncol) # check if the number of strings is more than 2/3s of", "no row headers, so we have to ensure the lens match if len(numerical_subtable)", "headers # excluding rows with column headers first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns = []", "column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T # if rows of the numerical subtable are all", "<reponame>allenai/pubmedextract<filename>pubmedextract/sex_utils/subdivide_table.py from itertools import groupby import numpy as np from pubmedextract.sex_utils.regex_utils import categorize_cell_string", "== 0: stack.append(curr) elif stack[-1][-1] >= curr[0]: prev = stack.pop() merged = sorted(list(set(prev", "if not (x in seen or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility function", "len(column_headers) == 0: column_headers = ['col_' + str(i) for i in range(table.ncol)] #", "== 1 and v_count == 1: return False else: return (s_count + 1)", "zip(empty_rows, all_append_rows): for append_row in append_rows: row_headers[append_row] = row_headers[i] + ' - '", "often split up by Omnipage! If two adjacent columns have column headers that", "subtable column_headers = column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:] # merge columns to previous one", "s_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'S') v_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'V') #", "col]][first_non_header_row_ind:] numerical_columns.append(col) # we only care about the rows/columns that span the numerical", "Male 50 30 Female 30 20 For this case, the strong 'Sex' is", "if empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i + 1]))", "combine them - Figure out which of the leftmost columns are row headers", "# edge case if there are no column header rows if len(column_headers) ==", "prev = stack.pop() result.append(prev) stack.append(curr) result += stack return result def _unique_sorted(seq): \"\"\"", "in range(len(empty_rows) - 1)] for i, append_rows in zip(empty_rows, all_append_rows): for append_row in", "with the top columns? # get headers column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers", "lens match if len(numerical_subtable) > 1 and len(numerical_subtable) == len(row_headers): row_headers, numerical_subtable =", "as np from pubmedextract.sex_utils.regex_utils import categorize_cell_string def subdivide(table): \"\"\" - Categorize each cell", "# as soon as this is false, we quit # TODO: maybe find", "in groupby(column_headers): g = list(g) ind_ranges_to_merge.append(list(range(i, i + len(g)))) i += len(g) #", "merge columns if they have the same headers i = 0 for k,", "i = 0 for k, g in groupby(column_headers): g = list(g) ind_ranges_to_merge.append(list(range(i, i", "= np.where(~empty_flag)[0] if len(empty_rows) > 0: if empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers)) all_append_rows =", "-> combine them - Put the remaining subtable into a numpy array TODO:", "break # TODO: maybe find other columns that are not contiguous with the", "only merge the cell contents if they are not identical numerical_columns_merged = []", "_get_and_increment_last(l): \"\"\" Utility function for subdivide \"\"\" if len(l) > 0: return l[-1]", "soon as this is false, we quit # TODO: maybe find other rows", "0 for k, g in groupby(column_headers): g = list(g) ind_ranges_to_merge.append(list(range(i, i + len(g))))", "the entire row s_count = np.sum(table_categories[i, :] == 'S') v_count = np.sum(table_categories[i, :]", "+ 1) >= 2. / 3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function for subdivide", "out how many of the leftmost columns are row headers # excluding rows", "other columns that are not contiguous with the top columns? # get headers", "# until the next empty set of rows # also sometimes there are", "concatenated \"\"\" # first, categorize each cell table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for", "if they have the same headers i = 0 for k, g in", "contiguous with the top columns? # get headers column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True)", "in append_rows: row_headers[append_row] = row_headers[i] + ' - ' + row_headers[append_row] row_headers =", "the header col = [str(i) for i in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) # we", "column is mostly empty empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols >", "= _get_and_increment_last(column_header_rows) row_header_columns = [] for i in range(0, table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:,", "condition because we have cut out some rows if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else:", "for j in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T # if rows of", "all empty # then this row's header can be appended to all the", "Female 30 20 \"\"\" empty_flag = (numerical_subtable == '').mean(1) == 1 empty_rows =", "import categorize_cell_string def subdivide(table): \"\"\" - Categorize each cell as string, value, or", "subcols = [numerical_columns[i] for i in ind_range_to_merge] merged_cols = [' '.join(_unique_sorted(j)).strip() for j", "for whether a row/col is a header or not. \"\"\" if s_count ==", "extract the part of the column that isn't the header col = [str(i)", "np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for i in range(table.nrow): for j in range(table.ncol): table_categories[i, j]", "the same headers i = 0 for k, g in groupby(column_headers): g =", "find other rows that are not contiguous with the top rows? # figure", "i in range(len(empty_rows) - 1)] for i, append_rows in zip(empty_rows, all_append_rows): for append_row", "for subdivide Keeps unique values but preserves order \"\"\" seen = set() seen_add", "\"\"\" Utility function for subdivide \"\"\" if row_flag: row_or_col_list = [table[i, :] for", "v_count == 1: return False else: return (s_count + 1) / (v_count +", ">= 2. / 3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function for subdivide Function that", "table.nrow): # sometimes the caption gets lobbed into the first column # and", "(numerical_subtable == '').mean(1) == 1 empty_rows = list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0] if len(empty_rows)", "0: if empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i +", "and len(numerical_subtable) == len(row_headers): row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged, row_headers, numerical_subtable", "_combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function for subdivide Function that combines overlapping integer ranges. Example", "Some rows headers actually apply to subsequent rows. E.g.: Sex np.nan np.nan Male", "'S') v_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'V') # TODO: maybe have a different", "1: return False else: return (s_count + 1) / (v_count + s_count +", "column_headers = ['col_' + str(i) for i in range(table.ncol)] # get numerical_subtable first_non_header_col_ind", "have column headers that end with 'n' and '%'/'(%)' respectively, then they should", "are often split up by Omnipage! If two adjacent columns have column headers", "of the column that isn't the header col = [str(i) for i in", "col in range(first_non_header_col_ind, table.ncol): # extract the part of the column that isn't", "for append_row in append_rows: row_headers[append_row] = row_headers[i] + ' - ' + row_headers[append_row]", "2/3s of the entire row s_count = np.sum(table_categories[i, :] == 'S') v_count =", "j in range(table.ncol): table_categories[i, j] = categorize_cell_string(table[i, j]) # figure out how many", "= seen.add return [x for x in seq if not (x in seen", "Utility function for subdivide \"\"\" if row_flag: row_or_col_list = [table[i, :] for i", "for x in seq if not (x in seen or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers,", "table.ncol): # extract the part of the column that isn't the header col", "are not contiguous with the top rows? # figure out how many of", "> 0] # merge columns if they have the same headers i =", "subdivide Heuristic for whether a row/col is a header or not. \"\"\" if", "ind_range_to_merge] merged_cols = [' '.join(_unique_sorted(j)).strip() for j in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable =", "= [table[:, i] for i in inds] return [' '.join(_unique_sorted([str(k) for k in", "# check if the number of strings is more than 2/3s of the", "headers actually apply to subsequent rows. E.g.: Sex np.nan np.nan Male 50 30", "header or not. \"\"\" if s_count == 1 and v_count == 1: return", "is pre-pended to 'Male' and 'Female' to get: Sex - Male 50 30", "rows headers actually apply to subsequent rows. E.g.: Sex np.nan np.nan Male 50", "part of the column that isn't the header col = [str(i) for i", "# merge columns if they have the same headers i = 0 for", "are not contiguous with the top columns? # get headers column_headers = _combine_omnipage_cell_list(table,", "\"\"\" Utility function for subdivide \"\"\" if len(l) > 0: return l[-1] +", "_row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else: break # TODO: maybe find other columns that are", "row_or_col_list = [table[:, i] for i in inds] return [' '.join(_unique_sorted([str(k) for k", "the number of strings is more than 2/3s of the entire row s_count", "there are no row headers, so we have to ensure the lens match", "20 \"\"\" empty_flag = (numerical_subtable == '').mean(1) == 1 empty_rows = list(np.where(empty_flag)[0]) non_empty_rows", "subtable into a numpy array TODO: Common problem: \"n (%)\" columns are often", "[[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack = [] result = [] for", "= sorted(ind_ranges_to_merge) stack = [] result = [] for curr in ind_ranges_to_merge: if", "many of the leftmost columns are row headers # excluding rows with column", "first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns = [] for col in range(first_non_header_col_ind, table.ncol): # extract", "+ 1, empty_rows[i + 1])) for i in range(len(empty_rows) - 1)] for i,", "the next empty set of rows # also sometimes there are no row", "result += stack return result def _unique_sorted(seq): \"\"\" Utility function for subdivide Keeps", "= row_headers[i] + ' - ' + row_headers[append_row] row_headers = [row_headers[i] for i", "= [[i - 1, i] for i in empty_col_inds if i > 0]", "# get headers column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False)", "are not identical numerical_columns_merged = [] column_headers_merged = [] for ind_range_to_merge in ind_ranges_to_merge:", "whether a row/col is a header or not. \"\"\" if s_count == 1", "empty # then this row's header can be appended to all the subsequent", "np from pubmedextract.sex_utils.regex_utils import categorize_cell_string def subdivide(table): \"\"\" - Categorize each cell as", "\"n (%)\" columns are often split up by Omnipage! If two adjacent columns", "if len(column_headers) == 0: column_headers = ['col_' + str(i) for i in range(table.ncol)]", "== 1: return False else: return (s_count + 1) / (v_count + s_count", "If two adjacent columns have column headers that end with 'n' and '%'/'(%)'", "non_empty_rows = np.where(~empty_flag)[0] if len(empty_rows) > 0: if empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers)) all_append_rows", "groupby import numpy as np from pubmedextract.sex_utils.regex_utils import categorize_cell_string def subdivide(table): \"\"\" -", "table_categories[i, j] = categorize_cell_string(table[i, j]) # figure out how many of the top", "numerical_subtable = np.array(numerical_columns_merged).T # if rows of the numerical subtable are all empty", "for i, append_rows in zip(empty_rows, all_append_rows): for append_row in append_rows: row_headers[append_row] = row_headers[i]", "if there are no column header rows if len(column_headers) == 0: column_headers =", "2. / 3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function for subdivide Function that combines", "then this row's header can be appended to all the subsequent row headers", "headers column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False) # edge", "[[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack", "len(g) # combine overlapping merging index ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) # perform the", "appended to all the subsequent row headers # until the next empty set", "np.where(empty_cols > 0.9)[0] ind_ranges_to_merge = [[i - 1, i] for i in empty_col_inds", "i in range(0, table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'S') v_count = np.sum(table_categories[first_non_header_row_ind:,", "'').mean(1) == 1 empty_rows = list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0] if len(empty_rows) > 0:", "overlapping merging index ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) # perform the merge # note:", "\"\"\" Utility function for subdivide Function that combines overlapping integer ranges. Example [[1,2,3],", "actually apply to subsequent rows. E.g.: Sex np.nan np.nan Male 50 30 Female", "- Male 50 30 Sex - Female 30 20 \"\"\" empty_flag = (numerical_subtable", "i in ind_range_to_merge] merged_cols = [' '.join(_unique_sorted(j)).strip() for j in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]])", "to all the subsequent row headers # until the next empty set of", "k in j])).strip() for j in zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\" Utility function for", "are row headers # excluding rows with column headers first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns", "if len(stack) == 0: stack.append(curr) elif stack[-1][-1] >= curr[0]: prev = stack.pop() merged", "categorize each cell table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for i in range(table.nrow): for", "dtype=np.unicode_) for i in range(table.nrow): for j in range(table.ncol): table_categories[i, j] = categorize_cell_string(table[i,", "else: return (s_count + 1) / (v_count + s_count + 1) >= 2.", "the leftmost columns are row headers # excluding rows with column headers first_non_header_row_ind", "the top columns? # get headers column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers =", "a different condition because we have cut out some rows if _row_or_col_is_header(s_count, v_count):", "quit # TODO: maybe find other rows that are not contiguous with the", "figure out how many of the leftmost columns are row headers # excluding", "column header rows if len(column_headers) == 0: column_headers = ['col_' + str(i) for", "empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols > 0.9)[0] ind_ranges_to_merge = [[i", "(v_count + s_count + 1) >= 2. / 3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility", "1])) for i in range(len(empty_rows) - 1)] for i, append_rows in zip(empty_rows, all_append_rows):", "for i in range(0, table.nrow): # sometimes the caption gets lobbed into the", "[str(i) for i in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) # we only care about the", "columns to previous one if the column is mostly empty empty_cols = (table_categories", "no column header rows if len(column_headers) == 0: column_headers = ['col_' + str(i)", "rows are column headers -> combine them - Figure out which of the", "if they are not identical numerical_columns_merged = [] column_headers_merged = [] for ind_range_to_merge", "first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns = [] for i in range(0, table.ncol): s_count =", "to get: Sex - Male 50 30 Sex - Female 30 20 \"\"\"", "== 'S') v_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'V') # TODO: maybe have a", "should be concatenated \"\"\" # first, categorize each cell table_categories = np.zeros((table.nrow, table.ncol),", "function for subdivide Function that combines overlapping integer ranges. Example [[1,2,3], [2,3], [3],", "care about the rows/columns that span the numerical subtable column_headers = column_headers[first_non_header_col_ind:] row_headers", "return l[-1] + 1 else: return 0 def _row_or_col_is_header(s_count, v_count): \"\"\" Utility function", "not contiguous with the top columns? # get headers column_headers = _combine_omnipage_cell_list(table, column_header_rows,", "# also sometimes there are no row headers, so we have to ensure", "inds] else: row_or_col_list = [table[:, i] for i in inds] return [' '.join(_unique_sorted([str(k)", "combine overlapping merging index ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) # perform the merge #", "# figure out how many of the leftmost columns are row headers #", "= np.sum(table_categories[i, :] == 'V') if all_rows_flag or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else: break", "empty empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols > 0.9)[0] ind_ranges_to_merge =", "merge the cell contents if they are not identical numerical_columns_merged = [] column_headers_merged", "def _get_and_increment_last(l): \"\"\" Utility function for subdivide \"\"\" if len(l) > 0: return", "for i in inds] return [' '.join(_unique_sorted([str(k) for k in j])).strip() for j", "stack = [] result = [] for curr in ind_ranges_to_merge: if len(stack) ==", "np.sum(table_categories[first_non_header_row_ind:, i] == 'V') # TODO: maybe have a different condition because we", "inds, row_flag): \"\"\" Utility function for subdivide \"\"\" if row_flag: row_or_col_list = [table[i,", "subdivide Keeps unique values but preserves order \"\"\" seen = set() seen_add =", "= set() seen_add = seen.add return [x for x in seq if not", "Utility function for subdivide Some rows headers actually apply to subsequent rows. E.g.:", "rows. E.g.: Sex np.nan np.nan Male 50 30 Female 30 20 For this", "row headers # excluding rows with column headers first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns =", "len(numerical_subtable) > 1 and len(numerical_subtable) == len(row_headers): row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return", "i] == 'V') # TODO: maybe have a different condition because we have", "+ curr))) stack.append(merged) else: prev = stack.pop() result.append(prev) stack.append(curr) result += stack return", "out how many of the top rows are column headers column_header_rows = []", "Utility function for subdivide Function that combines overlapping integer ranges. Example [[1,2,3], [2,3],", "are column headers column_header_rows = [] for i in range(0, table.nrow): # sometimes", "the subsequent row headers # until the next empty set of rows #", "= [table[i, :] for i in inds] else: row_or_col_list = [table[:, i] for", "row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged, row_headers, numerical_subtable def _combine_omnipage_cell_list(table, inds, row_flag):", "from itertools import groupby import numpy as np from pubmedextract.sex_utils.regex_utils import categorize_cell_string def", "0: stack.append(curr) elif stack[-1][-1] >= curr[0]: prev = stack.pop() merged = sorted(list(set(prev +", "Female 30 20 For this case, the strong 'Sex' is pre-pended to 'Male'", "- Categorize each cell as string, value, or empty - Figure out which", "other rows that are not contiguous with the top rows? # figure out", "x in seq if not (x in seen or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable):", ":] == 'V') if all_rows_flag or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else: break # as", "combines overlapping integer ranges. Example [[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]]", "= categorize_cell_string(table[i, j]) # figure out how many of the top rows are", "how many of the leftmost columns are row headers # excluding rows with", "for i in empty_col_inds if i > 0] # merge columns if they", "np.where(~empty_flag)[0] if len(empty_rows) > 0: if empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i]", "- Put the remaining subtable into a numpy array TODO: Common problem: \"n", "not contiguous with the top rows? # figure out how many of the", "i in inds] else: row_or_col_list = [table[:, i] for i in inds] return", "= _combine_ind_ranges(ind_ranges_to_merge) # perform the merge # note: only merge the cell contents", "[4,5], [5]] -> [[1,2,3], [4,5]] \"\"\" ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack = [] result", "get numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns = [] for col in range(first_non_header_col_ind, table.ncol):", "of rows # also sometimes there are no row headers, so we have", "column_header_rows, row_flag=True) row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False) # edge case if there are", "row_flag): \"\"\" Utility function for subdivide \"\"\" if row_flag: row_or_col_list = [table[i, :]", "ind_ranges_to_merge.append(list(range(i, i + len(g)))) i += len(g) # combine overlapping merging index ranges", "v_count = np.sum(table_categories[i, :] == 'V') if all_rows_flag or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else:", "Heuristic for whether a row/col is a header or not. \"\"\" if s_count", "i > 0] # merge columns if they have the same headers i", "the numerical subtable are all empty # then this row's header can be", "column headers -> combine them - Figure out which of the leftmost columns", "of the leftmost columns are row headers -> combine them - Put the", "k, g in groupby(column_headers): g = list(g) ind_ranges_to_merge.append(list(range(i, i + len(g)))) i +=", "span the numerical subtable column_headers = column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:] # merge columns", "= np.array(numerical_columns_merged).T # if rows of the numerical subtable are all empty #", "strings is more than 2/3s of the entire row s_count = np.sum(table_categories[i, :]", "in range(0, table.nrow): # sometimes the caption gets lobbed into the first column", "- Figure out which of the leftmost columns are row headers -> combine", "if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else: break # TODO: maybe find other columns that", "this is false, we quit # TODO: maybe find other rows that are", "# TODO: maybe find other rows that are not contiguous with the top", "the top rows? # figure out how many of the leftmost columns are", "+ len(g)))) i += len(g) # combine overlapping merging index ranges ind_ranges_to_merge =", "\"\"\" - Categorize each cell as string, value, or empty - Figure out", "== len(row_headers): row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged, row_headers, numerical_subtable def _combine_omnipage_cell_list(table,", ":] for i in inds] else: row_or_col_list = [table[:, i] for i in", "inds] return [' '.join(_unique_sorted([str(k) for k in j])).strip() for j in zip(*row_or_col_list)] def", "== 'V') if all_rows_flag or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else: break # as soon", "all_append_rows): for append_row in append_rows: row_headers[append_row] = row_headers[i] + ' - ' +", "i in table[:, col]][first_non_header_row_ind:] numerical_columns.append(col) # we only care about the rows/columns that", "> 0: if empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i", "\"\"\" if len(l) > 0: return l[-1] + 1 else: return 0 def", "columns have column headers that end with 'n' and '%'/'(%)' respectively, then they", "some rows if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else: break # TODO: maybe find other", "# combine overlapping merging index ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) # perform the merge", "np.array(numerical_columns_merged).T # if rows of the numerical subtable are all empty # then", "can be appended to all the subsequent row headers # until the next", "ensure the lens match if len(numerical_subtable) > 1 and len(numerical_subtable) == len(row_headers): row_headers,", "for i in inds] else: row_or_col_list = [table[:, i] for i in inds]", "empty set of rows # also sometimes there are no row headers, so", "= [list(range(empty_rows[i] + 1, empty_rows[i + 1])) for i in range(len(empty_rows) - 1)]", "range(0, table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'S') v_count = np.sum(table_categories[first_non_header_row_ind:, i] ==", "in inds] return [' '.join(_unique_sorted([str(k) for k in j])).strip() for j in zip(*row_or_col_list)]", "column_header_rows = [] for i in range(0, table.nrow): # sometimes the caption gets", "ind_ranges_to_merge: subcols = [numerical_columns[i] for i in ind_range_to_merge] merged_cols = [' '.join(_unique_sorted(j)).strip() for", "not (x in seen or seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility function for", "here: all_rows_flag = (table[i, 0].indices[-1][1] + 1 == table.ncol) # check if the", "table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'S') v_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'V')", "columns that are not contiguous with the top columns? # get headers column_headers", "subdivide \"\"\" if row_flag: row_or_col_list = [table[i, :] for i in inds] else:", "\"\"\" empty_flag = (numerical_subtable == '').mean(1) == 1 empty_rows = list(np.where(empty_flag)[0]) non_empty_rows =", "the strong 'Sex' is pre-pended to 'Male' and 'Female' to get: Sex -", "if rows of the numerical subtable are all empty # then this row's", "+= len(g) # combine overlapping merging index ranges ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge) # perform", "table.ncol), dtype=np.unicode_) for i in range(table.nrow): for j in range(table.ncol): table_categories[i, j] =", "up by Omnipage! If two adjacent columns have column headers that end with", "'.join(_unique_sorted(j)).strip() for j in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T # if rows", ":] == 'S') v_count = np.sum(table_categories[i, :] == 'V') if all_rows_flag or _row_or_col_is_header(s_count,", "this case, the strong 'Sex' is pre-pended to 'Male' and 'Female' to get:", "subdivide(table): \"\"\" - Categorize each cell as string, value, or empty - Figure", "Sex - Female 30 20 \"\"\" empty_flag = (numerical_subtable == '').mean(1) == 1", "two adjacent columns have column headers that end with 'n' and '%'/'(%)' respectively,", "if the column is mostly empty empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds =", "_get_and_increment_last(row_header_columns) numerical_columns = [] for col in range(first_non_header_col_ind, table.ncol): # extract the part", "empty_col_inds if i > 0] # merge columns if they have the same", "len(empty_rows) > 0: if empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i] + 1,", "empty_rows[i + 1])) for i in range(len(empty_rows) - 1)] for i, append_rows in", "seen_add(x))] def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable): \"\"\" Utility function for subdivide Some rows headers actually", "top columns? # get headers column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers = _combine_omnipage_cell_list(table,", "not. \"\"\" if s_count == 1 and v_count == 1: return False else:", "which of the top rows are column headers -> combine them - Figure", "return result def _unique_sorted(seq): \"\"\" Utility function for subdivide Keeps unique values but", "- Female 30 20 \"\"\" empty_flag = (numerical_subtable == '').mean(1) == 1 empty_rows", "0].indices[-1][1] + 1 == table.ncol) # check if the number of strings is", "subsequent rows. E.g.: Sex np.nan np.nan Male 50 30 Female 30 20 For", "is more than 2/3s of the entire row s_count = np.sum(table_categories[i, :] ==", "or _row_or_col_is_header(s_count, v_count): column_header_rows.append(i) else: break # as soon as this is false,", "are no column header rows if len(column_headers) == 0: column_headers = ['col_' +", "1 and v_count == 1: return False else: return (s_count + 1) /", "overlapping integer ranges. Example [[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]] \"\"\"", "if row_flag: row_or_col_list = [table[i, :] for i in inds] else: row_or_col_list =", "be appended to all the subsequent row headers # until the next empty", "headers -> combine them - Put the remaining subtable into a numpy array", "numerical_columns = [] for col in range(first_non_header_col_ind, table.ncol): # extract the part of", "'V') # TODO: maybe have a different condition because we have cut out", "function for subdivide Keeps unique values but preserves order \"\"\" seen = set()", "many rows. detect that here: all_rows_flag = (table[i, 0].indices[-1][1] + 1 == table.ncol)", "empty_flag = (numerical_subtable == '').mean(1) == 1 empty_rows = list(np.where(empty_flag)[0]) non_empty_rows = np.where(~empty_flag)[0]", "# excluding rows with column headers first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns = [] for", "# TODO: maybe have a different condition because we have cut out some", "are row headers -> combine them - Put the remaining subtable into a", "Figure out which of the leftmost columns are row headers -> combine them", "the numerical subtable column_headers = column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:] # merge columns to", "the merge # note: only merge the cell contents if they are not", "which of the leftmost columns are row headers -> combine them - Put", "numpy array TODO: Common problem: \"n (%)\" columns are often split up by", "1, empty_rows[i + 1])) for i in range(len(empty_rows) - 1)] for i, append_rows", "many of the top rows are column headers column_header_rows = [] for i", "get headers column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False) #", "range(table.ncol)] # get numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns = [] for col in", "g in groupby(column_headers): g = list(g) ind_ranges_to_merge.append(list(range(i, i + len(g)))) i += len(g)", "i in range(table.ncol)] # get numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns = [] for", "split up by Omnipage! If two adjacent columns have column headers that end", "of the leftmost columns are row headers # excluding rows with column headers", "# merge columns to previous one if the column is mostly empty empty_cols", "result.append(prev) stack.append(curr) result += stack return result def _unique_sorted(seq): \"\"\" Utility function for", "ind_ranges_to_merge = [[i - 1, i] for i in empty_col_inds if i >", "rows of the numerical subtable are all empty # then this row's header", "mostly empty empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols > 0.9)[0] ind_ranges_to_merge", "in ind_ranges_to_merge: subcols = [numerical_columns[i] for i in ind_range_to_merge] merged_cols = [' '.join(_unique_sorted(j)).strip()", "Sex np.nan np.nan Male 50 30 Female 30 20 For this case, the", "j])).strip() for j in zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\" Utility function for subdivide \"\"\"", "column headers that end with 'n' and '%'/'(%)' respectively, then they should be", "\"\"\" seen = set() seen_add = seen.add return [x for x in seq", "'Sex' is pre-pended to 'Male' and 'Female' to get: Sex - Male 50", "zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\" Utility function for subdivide \"\"\" if len(l) > 0:", "append_row in append_rows: row_headers[append_row] = row_headers[i] + ' - ' + row_headers[append_row] row_headers", "all the subsequent row headers # until the next empty set of rows", "the first column # and splayed across many rows. detect that here: all_rows_flag", "= np.sum(table_categories[first_non_header_row_ind:, i] == 'S') v_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'V') # TODO:", "until the next empty set of rows # also sometimes there are no", "so we have to ensure the lens match if len(numerical_subtable) > 1 and", "' + row_headers[append_row] row_headers = [row_headers[i] for i in non_empty_rows] numerical_subtable = numerical_subtable[non_empty_rows]", "50 30 Female 30 20 For this case, the strong 'Sex' is pre-pended", "+ 1 == table.ncol) # check if the number of strings is more", "row_flag: row_or_col_list = [table[i, :] for i in inds] else: row_or_col_list = [table[:,", "1) >= 2. / 3. def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function for subdivide Function", "numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable) return column_headers_merged, row_headers, numerical_subtable def _combine_omnipage_cell_list(table, inds, row_flag): \"\"\"", "(%)\" columns are often split up by Omnipage! If two adjacent columns have", "they are not identical numerical_columns_merged = [] column_headers_merged = [] for ind_range_to_merge in", "rows # also sometimes there are no row headers, so we have to", "row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False) # edge case if there are no column", "+= stack return result def _unique_sorted(seq): \"\"\" Utility function for subdivide Keeps unique", "= column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:] # merge columns to previous one if the", "Utility function for subdivide Keeps unique values but preserves order \"\"\" seen =", "top rows are column headers column_header_rows = [] for i in range(0, table.nrow):", "order \"\"\" seen = set() seen_add = seen.add return [x for x in", "table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for i in range(table.nrow): for j in range(table.ncol):", "v_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'V') # TODO: maybe have a different condition", "# extract the part of the column that isn't the header col =", "for i in range(0, table.ncol): s_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'S') v_count =", "numerical_subtable def _combine_omnipage_cell_list(table, inds, row_flag): \"\"\" Utility function for subdivide \"\"\" if row_flag:", "i] for i in inds] return [' '.join(_unique_sorted([str(k) for k in j])).strip() for", "# we only care about the rows/columns that span the numerical subtable column_headers", "def _unique_sorted(seq): \"\"\" Utility function for subdivide Keeps unique values but preserves order", "numerical subtable are all empty # then this row's header can be appended", "leftmost columns are row headers # excluding rows with column headers first_non_header_row_ind =", "array TODO: Common problem: \"n (%)\" columns are often split up by Omnipage!", "str(i) for i in range(table.ncol)] # get numerical_subtable first_non_header_col_ind = _get_and_increment_last(row_header_columns) numerical_columns =", "j in zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\" Utility function for subdivide \"\"\" if len(l)", "adjacent columns have column headers that end with 'n' and '%'/'(%)' respectively, then", "[x for x in seq if not (x in seen or seen_add(x))] def", "Utility function for subdivide Heuristic for whether a row/col is a header or", "function for subdivide Some rows headers actually apply to subsequent rows. E.g.: Sex", "merged_cols = [' '.join(_unique_sorted(j)).strip() for j in zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T", "'n' and '%'/'(%)' respectively, then they should be concatenated \"\"\" # first, categorize", "row/col is a header or not. \"\"\" if s_count == 1 and v_count", "- 1)] for i, append_rows in zip(empty_rows, all_append_rows): for append_row in append_rows: row_headers[append_row]", "first, categorize each cell table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_) for i in range(table.nrow):", "0: return l[-1] + 1 else: return 0 def _row_or_col_is_header(s_count, v_count): \"\"\" Utility", "each cell as string, value, or empty - Figure out which of the", "ind_ranges_to_merge: if len(stack) == 0: stack.append(curr) elif stack[-1][-1] >= curr[0]: prev = stack.pop()", "more than 2/3s of the entire row s_count = np.sum(table_categories[i, :] == 'S')", "set of rows # also sometimes there are no row headers, so we", "stack.append(curr) result += stack return result def _unique_sorted(seq): \"\"\" Utility function for subdivide", "columns are row headers # excluding rows with column headers first_non_header_row_ind = _get_and_increment_last(column_header_rows)", "how many of the top rows are column headers column_header_rows = [] for", "= (table[i, 0].indices[-1][1] + 1 == table.ncol) # check if the number of", "numerical subtable column_headers = column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:] # merge columns to previous", "header can be appended to all the subsequent row headers # until the", "splayed across many rows. detect that here: all_rows_flag = (table[i, 0].indices[-1][1] + 1", "of the top rows are column headers column_header_rows = [] for i in", "row_headers[first_non_header_row_ind:] # merge columns to previous one if the column is mostly empty", "rows with column headers first_non_header_row_ind = _get_and_increment_last(column_header_rows) row_header_columns = [] for i in", "= [] result = [] for curr in ind_ranges_to_merge: if len(stack) == 0:", "column # and splayed across many rows. detect that here: all_rows_flag = (table[i,", "by Omnipage! If two adjacent columns have column headers that end with 'n'", "have cut out some rows if _row_or_col_is_header(s_count, v_count): row_header_columns.append(i) else: break # TODO:", "_combine_omnipage_cell_list(table, row_header_columns, row_flag=False) # edge case if there are no column header rows", "ind_ranges_to_merge = sorted(ind_ranges_to_merge) stack = [] result = [] for curr in ind_ranges_to_merge:", "that end with 'n' and '%'/'(%)' respectively, then they should be concatenated \"\"\"", "maybe find other rows that are not contiguous with the top rows? #", "stack.pop() result.append(prev) stack.append(curr) result += stack return result def _unique_sorted(seq): \"\"\" Utility function", "def _combine_omnipage_cell_list(table, inds, row_flag): \"\"\" Utility function for subdivide \"\"\" if row_flag: row_or_col_list", "zip(*subcols)] numerical_columns_merged.append(merged_cols) column_headers_merged.append(column_headers[ind_range_to_merge[0]]) numerical_subtable = np.array(numerical_columns_merged).T # if rows of the numerical subtable", "v_count): \"\"\" Utility function for subdivide Heuristic for whether a row/col is a", "is mostly empty empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols > 0.9)[0]", "in empty_col_inds if i > 0] # merge columns if they have the", "all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i + 1])) for i in range(len(empty_rows) -", "row headers -> combine them - Put the remaining subtable into a numpy", "if len(empty_rows) > 0: if empty_rows[-1] != len(row_headers): empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i] +", "function for subdivide Heuristic for whether a row/col is a header or not.", "sorted(list(set(prev + curr))) stack.append(merged) else: prev = stack.pop() result.append(prev) stack.append(curr) result += stack", "+ 1) / (v_count + s_count + 1) >= 2. / 3. def", "Categorize each cell as string, value, or empty - Figure out which of", "out which of the leftmost columns are row headers -> combine them -", "as string, value, or empty - Figure out which of the top rows", "'Male' and 'Female' to get: Sex - Male 50 30 Sex - Female", "identical numerical_columns_merged = [] column_headers_merged = [] for ind_range_to_merge in ind_ranges_to_merge: subcols =", "to ensure the lens match if len(numerical_subtable) > 1 and len(numerical_subtable) == len(row_headers):", "column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True) row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False) # edge case", "i] for i in empty_col_inds if i > 0] # merge columns if", "row s_count = np.sum(table_categories[i, :] == 'S') v_count = np.sum(table_categories[i, :] == 'V')", "and splayed across many rows. detect that here: all_rows_flag = (table[i, 0].indices[-1][1] +", "first column # and splayed across many rows. detect that here: all_rows_flag =", "subsequent row headers # until the next empty set of rows # also", "= (table_categories == 'E').mean(0)[first_non_header_col_ind:] empty_col_inds = np.where(empty_cols > 0.9)[0] ind_ranges_to_merge = [[i -", "note: only merge the cell contents if they are not identical numerical_columns_merged =", "match if len(numerical_subtable) > 1 and len(numerical_subtable) == len(row_headers): row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers,", "column that isn't the header col = [str(i) for i in table[:, col]][first_non_header_row_ind:]", "in zip(*row_or_col_list)] def _get_and_increment_last(l): \"\"\" Utility function for subdivide \"\"\" if len(l) >", "have to ensure the lens match if len(numerical_subtable) > 1 and len(numerical_subtable) ==", "for subdivide Heuristic for whether a row/col is a header or not. \"\"\"", "TODO: maybe have a different condition because we have cut out some rows", "to previous one if the column is mostly empty empty_cols = (table_categories ==", "1) / (v_count + s_count + 1) >= 2. / 3. def _combine_ind_ranges(ind_ranges_to_merge):", "- 1, i] for i in empty_col_inds if i > 0] # merge", "for col in range(first_non_header_col_ind, table.ncol): # extract the part of the column that", "seen.add return [x for x in seq if not (x in seen or", "across many rows. detect that here: all_rows_flag = (table[i, 0].indices[-1][1] + 1 ==", "end with 'n' and '%'/'(%)' respectively, then they should be concatenated \"\"\" #", "row_headers, numerical_subtable def _combine_omnipage_cell_list(table, inds, row_flag): \"\"\" Utility function for subdivide \"\"\" if", "Sex - Male 50 30 Sex - Female 30 20 \"\"\" empty_flag =", "sometimes the caption gets lobbed into the first column # and splayed across", "that span the numerical subtable column_headers = column_headers[first_non_header_col_ind:] row_headers = row_headers[first_non_header_row_ind:] # merge", "sorted(ind_ranges_to_merge) stack = [] result = [] for curr in ind_ranges_to_merge: if len(stack)", "!= len(row_headers): empty_rows.append(len(row_headers)) all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i + 1])) for i", "def _combine_ind_ranges(ind_ranges_to_merge): \"\"\" Utility function for subdivide Function that combines overlapping integer ranges.", "Figure out which of the top rows are column headers -> combine them", "into a numpy array TODO: Common problem: \"n (%)\" columns are often split", "> 0: return l[-1] + 1 else: return 0 def _row_or_col_is_header(s_count, v_count): \"\"\"", "subdivide Function that combines overlapping integer ranges. Example [[1,2,3], [2,3], [3], [4,5], [5]]", "seen = set() seen_add = seen.add return [x for x in seq if" ]
[ "you like, # 2 per up/down/left/right?) But if you get one correct likely", "1 mark for getting only one condition correct # 2 marks for getting", "if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return 'not over' for i in range(len(mat)): #check for", "len(directions) - 1)] newMat = deepcopy(mat) gameScore = initialScore while game_state(newMat)!='lose': try: newMat,", "range(len(mat[x])): maxTile = max(maxTile, mat[x][y]) return maxTile def heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat)) score", "maxTile def heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat)) score = monotonicity(mat)*1.5 + number_of_empty_cells*2 + +", "currentValue = calculatedValue return currentValue else: number = 0 sum_value = 0 emptyCells", "depth, maximizer): if depth==0: return heuristic_score(mat) if maximizer: currentValue = -1 d =", "value. \"\"\" mat[row][col] = value return mat def move(game, direction): if(direction==\"up\"): return up(game)", "monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat) return score def monotonicity(grid): grid_mask = [[2048,", "# 3 marks for correct checking def game_state(mat): for i in range(len(mat)): for", "minimize(c, depth - 1)) except IndexError: continue return maxUtility def minimize(mat, depth): if", "of the three conditions # 3 marks for correct checking def game_state(mat): for", "problem set. from random import * from copy import deepcopy import math import", "0: return expectimax(mat, depth-1, True) return (sum_value/number) def set_tile(mat, row, col, value): \"\"\"", "\"\"\" emptySquareList = [] for row in range(len(mat)): for col in range(len(mat[0])): if", "= max(maxUtility, alpha) if alpha >= beta: break return maxUtility def alphaBeta(grid, max,", "correct loop def new_tile(mat): seq = [2] * 90 + [4] newTile =", "up/down entries on last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over' return 'lose' ###########", "newBoard = deepcopy(mat) newBoard, done, score = move(newBoard, direction) calculatedValue = expectimax(newBoard, depth", "is created on a zero entry # 1 mark for creating the correct", "+ abs(y - 0)) * max_tile) else: dis = -((abs(x - 0) +", "j in range(len(mat)): new[i].append(mat[j][i]) return new ########## # Task 3 # ########## #", "col]) return emptySquareList def getMaxTile(mat): maxTile = 0 for x in range(len(mat)): for", "if minUtility <= alpha: break beta = min(minUtility, beta) # print minUtility return", "beta) # print minUtility return minUtility def montecarlo(mat, initialScore): scores = [] for", "return mat def move(game, direction): if(direction==\"up\"): return up(game) elif direction==\"down\": return down(game) #", "deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children: minUtility", "commented out to allow us to run your # code easily while grading", "up(game) elif direction==\"down\": return down(game) # down(game) elif direction == \"left\": return left(game)", "* from copy import deepcopy import math import random ####### #Task 1a# #######", "copy import deepcopy import math import random ####### #Task 1a# ####### # [Marking", "+ number_of_empty_cells*2 + + getMaxTile(mat) return score def monotonicity(grid): grid_mask = [[2048, 1024,", "def minimax(grid, max, startDepth): if max: return maximize(grid, startDepth) else: return minimize(grid, startDepth)", "temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done, score) def left(game): # print(\"left\") # return matrix", "by flipping the matrix around # No idea how to grade this one", "print minUtility return minUtility def a_minimize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth", "be able to solve the entire thing just by flipping the matrix around", "pegged to 8 (which gives you like, # 2 per up/down/left/right?) But if", "wrong result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i in range(4): count=0 for j", "- 0) + abs(y - 0)) * (max_tile / 2)) break return dis", "elif direction == \"left\": return left(game) elif direction==\"right\": return right(game) def up(game): #", "getMaxTile(mat) return score def monotonicity(grid): grid_mask = [[2048, 1024, 256, 64], [1024, 256,", "def new_game(n): matrix = [] for i in range(n): matrix.append([0] * n) return", "depth == 0: return heuristic_score(mat) maxUtility = -float('inf') d = ['up', 'down', 'left',", "import * from copy import deepcopy import math import random ####### #Task 1a#", "done=done or temp[1] score = temp[2] game=cover_up(game)[0] game=transpose(game) return (game,done, score) def down(game):", "return maxUtility def alphaBeta(grid, max, startDepth): if max: return a_maximize(grid, -float('inf'), float('inf'), startDepth)", "def maximize(mat, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) maxUtility =", "minUtility def montecarlo(mat, initialScore): scores = [] for i in range(0, 100): directions", "[] for i in range(n): matrix.append([0] * n) return matrix ########### # Task", "score += mat[i][j] * 2 mat[i][j]*=2 mat[i][j+1]=0 done=True return (mat,done, score) def empty_cells(mat):", "-> merge -> compress again # Basically if they can solve one side,", "direction) newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError: break scores.append(gameScore) return sum(scores)/len(scores) def expectimax(mat,", "return 'not over' return 'lose' ########### # Task 2a # ########### # [Marking", "grid[row][column] * grid_mask[row][column] return monotonicity_score def distance(mat, max_tile): dis = None for x", "game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) maxUtility = -float('inf') d = ['up',", "for row in range(3): for column in range(3): monotonicity_score += grid[row][column] * grid_mask[row][column]", "game=transpose(game) return (game,done, score) def down(game): # print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0] score", "completely wrong solutions # 1 mark for getting only one condition correct #", "= move(c, direction) if done: maxUtility = max(maxUtility, a_minimize(c, alpha, beta, depth-1 ))", "scores.append(gameScore) return sum(scores)/len(scores) def expectimax(mat, depth, maximizer): if depth==0: return heuristic_score(mat) if maximizer:", "y in range(len(mat)): if max_tile == mat[x][y]: if max_tile < 1024: dis =", "0: return heuristic_score(mat) maxUtility = -float('inf') d = ['up', 'down', 'left', 'right'] for", "correct likely to get all correct so... # Check the down one. Reverse/transpose", "that work for all sizes of matrices def reverse(mat): new=[] for i in", "return (game,done, score) def down(game): # print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0] score =", "d = ['up', 'down', 'left', 'right'] for direction in d: newBoard = deepcopy(mat)", "getMaxTile(mat): maxTile = 0 for x in range(len(mat)): for y in range(len(mat[x])): maxTile", "== 0: return expectimax(mat, depth-1, True) return (sum_value/number) def set_tile(mat, row, col, value):", "range(len(mat)): if max_tile == mat[x][y]: if max_tile < 1024: dis = -((abs(x -", "[] for row in range(len(mat)): for col in range(len(mat[0])): if mat[row][col] == 0:", "value): \"\"\" Set the tile at position row, col to have the given", "elements must be equal but not identical # 1 mark for creating the", "child in children: minUtility = min(minUtility, maximize(child, depth - 1)) # print minUtility", "currentValue else: number = 0 sum_value = 0 emptyCells = empty_cells(mat) children =", "in range(n): matrix.append([0] * n) return matrix ########### # Task 1b # ###########", "game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done, score) def", "0 for x in range(len(mat)): for y in range(len(mat[x])): maxTile = max(maxTile, mat[x][y])", "\"\"\" Set the tile at position row, col to have the given value.", "i in range(len(mat)): new.append([]) for j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new ########### #", "if(direction==\"up\"): return up(game) elif direction==\"down\": return down(game) # down(game) elif direction == \"left\":", "else: dis = -((abs(x - 0) + abs(y - 0)) * (max_tile /", "be equal but not identical # 1 mark for creating the correct matrix", "mat[i][j]==2048: return 'win' for i in range(len(mat)-1): #intentionally reduced to check the row", "range(4): for j in range(3): if mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score += mat[i][j] *", "mat[row][col] == 0: emptySquareList.append([row, col]) return emptySquareList def getMaxTile(mat): maxTile = 0 for", "temp[2] done=done or temp[1] game=cover_up(game)[0] return (game,done, score) def right(game): # print(\"right\") #", "def montecarlo(mat, initialScore): scores = [] for i in range(0, 100): directions =", "mat[i][j]!=0: score += mat[i][j] * 2 mat[i][j]*=2 mat[i][j+1]=0 done=True return (mat,done, score) def", "on last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over' return 'lose' ########### # Task", "1 mark for solutions that show general understanding # 2 marks for correct", "a_maximize(grid, -float('inf'), float('inf'), startDepth) else: return a_minimize(grid, -float('inf'), float('inf'), startDepth) def minimax(grid, max,", "score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done, score) def left(game):", "at position row, col to have the given value. \"\"\" mat[row][col] = value", "a_maximize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) maxUtility", "+ [4] newTile = choice(seq) emptySquareList = empty_cells(mat) emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] =", "a_minimize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) minUtility", "created on a zero entry # 1 mark for creating the correct loop", "depth == 0: return heuristic_score(mat) minUtility = float('inf') emptyCells = empty_cells(mat) children =", "newBoard, done, score = move(newBoard, direction) calculatedValue = expectimax(newBoard, depth - 1, False)", "game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done,", "# ########### # [Marking Scheme] # Points to note: # 0 marks for", "Matrix elements must be equal but not identical # 1 mark for creating", "reverse(mat): new=[] for i in range(len(mat)): new.append([]) for j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return", "child in children: minUtility = min(minUtility, a_maximize(child, alpha, beta, depth - 1)) if", "us to run your # code easily while grading your problem set. from", "i in range(0, 100): directions = ['up', 'down', 'left', 'right'] direction = directions[random.randint(0,", "marks for correct checking def game_state(mat): for i in range(len(mat)): for j in", "Scheme] # Points to note: # 0 marks for completely incorrect solutions #", "done=True return (mat,done, score) def empty_cells(mat): \"\"\" Return a list of empty cells.", "in range(3): monotonicity_score += grid[row][column] * grid_mask[row][column] return monotonicity_score def distance(mat, max_tile): dis", "condition correct # 2 marks for getting two of the three conditions #", "if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over' return 'lose' ########### # Task 2a # ###########", "IndexError: break scores.append(gameScore) return sum(scores)/len(scores) def expectimax(mat, depth, maximizer): if depth==0: return heuristic_score(mat)", "number_of_empty_cells = len(empty_cells(mat)) score = monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat) return score", "range(len(mat)): for col in range(len(mat[0])): if mat[row][col] == 0: emptySquareList.append([row, col]) return emptySquareList", "try: c, done = move(c, direction) if done: maxUtility = max(maxUtility, minimize(c, depth", "up game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0] done=done or temp[1] score = temp[2] game=cover_up(game)[0] game=transpose(game)", "range(n): matrix.append([0] * n) return matrix ########### # Task 1b # ########### #", "def alphaBeta(grid, max, startDepth): if max: return a_maximize(grid, -float('inf'), float('inf'), startDepth) else: return", "4) children.append(gridCopy) for child in children: minUtility = min(minUtility, maximize(child, depth - 1))", "random ####### #Task 1a# ####### # [Marking Scheme] # Points to note: #", "score = monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat) return score def monotonicity(grid): grid_mask", "sum_value+= expectimax(child, depth-1, True) number+=1 if number == 0: return expectimax(mat, depth-1, True)", "to note: # 0 marks for completely incorrect solutions # 1 mark for", "maximizer): if depth==0: return heuristic_score(mat) if maximizer: currentValue = -1 d = ['up',", "left(game): # print(\"left\") # return matrix after shifting left game,done=cover_up(game) temp=merge(game) game=temp[0] score", "1)] newMat = deepcopy(mat) gameScore = initialScore while game_state(newMat)!='lose': try: newMat, done, score", "the given value. \"\"\" mat[row][col] = value return mat def move(game, direction): if(direction==\"up\"):", "col, value): \"\"\" Set the tile at position row, col to have the", "up(game): # print(\"up\") # return matrix after shifting up game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0]", "I have it pegged to 8 (which gives you like, # 2 per", "def empty_cells(mat): \"\"\" Return a list of empty cells. \"\"\" emptySquareList = []", "for i in range(4): for j in range(3): if mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score", "try: c, done = move(c, direction) if done: maxUtility = max(maxUtility, a_minimize(c, alpha,", "choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile return mat ########### # Task 1c # ########### #", "mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over' for j in range(len(mat)-1): #check up/down entries on last", "else: return minimize(grid, startDepth) def maximize(mat, depth): if game_state(mat)=='lose' or depth == 0:", "monotonicity_score def distance(mat, max_tile): dis = None for x in range(len(mat)): if dis:", "c[1], 4) children.append(gridCopy) for child in children: minUtility = min(minUtility, maximize(child, depth -", "loop def new_tile(mat): seq = [2] * 90 + [4] newTile = choice(seq)", "c in emptyCells: gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 2) children.append(gridCopy)", "64], [1024, 256, 64, 16], [256, 64, 16, 4], [64, 16, 4, 1]]", "1 mark for creating the correct matrix def new_game(n): matrix = [] for", "grid_mask = [[2048, 1024, 256, 64], [1024, 256, 64, 16], [256, 64, 16,", "= 0 for row in range(3): for column in range(3): monotonicity_score += grid[row][column]", "done = move(c, direction) if done: maxUtility = max(maxUtility, minimize(c, depth - 1))", "['up', 'down', 'left', 'right'] for direction in d: c = deepcopy(mat) try: c,", "= move(c, direction) if done: maxUtility = max(maxUtility, minimize(c, depth - 1)) except", "256, 64], [1024, 256, 64, 16], [256, 64, 16, 4], [64, 16, 4,", "= set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children: minUtility = min(minUtility,", "min(minUtility, a_maximize(child, alpha, beta, depth - 1)) if minUtility <= alpha: break beta", "entry # 1 mark for creating the correct loop def new_tile(mat): seq =", "+= mat[i][j] * 2 mat[i][j]*=2 mat[i][j+1]=0 done=True return (mat,done, score) def empty_cells(mat): \"\"\"", "game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done, score) def left(game): # print(\"left\") # return matrix after", "children: minUtility = min(minUtility, maximize(child, depth - 1)) # print minUtility return minUtility", "for i in range(0, 100): directions = ['up', 'down', 'left', 'right'] direction =", "if you get one correct likely to get all correct so... # Check", "number+=1 if number == 0: return expectimax(mat, depth-1, True) return (sum_value/number) def set_tile(mat,", "def set_tile(mat, row, col, value): \"\"\" Set the tile at position row, col", "the correct matrix def new_game(n): matrix = [] for i in range(n): matrix.append([0]", "for child in children: minUtility = min(minUtility, maximize(child, depth - 1)) # print", "in range(len(mat)): new[i].append(mat[j][i]) return new ########## # Task 3 # ########## # [Marking", "def a_maximize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat)", "done = move(c, direction) if done: maxUtility = max(maxUtility, a_minimize(c, alpha, beta, depth-1", "print(\"right\") # return matrix after shifting right game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0] score =", "minimax(grid, max, startDepth): if max: return maximize(grid, startDepth) else: return minimize(grid, startDepth) def", "print(\"error-----------------------------------------------------------------------------\") continue alpha = max(maxUtility, alpha) if alpha >= beta: break return maxUtility", "def getMaxTile(mat): maxTile = 0 for x in range(len(mat)): for y in range(len(mat[x])):", "it is created on a zero entry # 1 mark for creating the", "# 1 mark for creating the correct loop def new_tile(mat): seq = [2]", "can solve one side, and use transpose and reverse correctly they should #", "max, startDepth): if max: return a_maximize(grid, -float('inf'), float('inf'), startDepth) else: return a_minimize(grid, -float('inf'),", "2) children.append(gridCopy) gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for", "column in range(3): monotonicity_score += grid[row][column] * grid_mask[row][column] return monotonicity_score def distance(mat, max_tile):", "or temp[1] game=cover_up(game)[0] return (game,done, score) def right(game): # print(\"right\") # return matrix", "in range(len(mat)): if max_tile == mat[x][y]: if max_tile < 1024: dis = -((abs(x", "continue return maxUtility def minimize(mat, depth): if game_state(mat)=='lose' or depth == 0: return", "children: sum_value+= expectimax(child, depth-1, True) number+=1 if number == 0: return expectimax(mat, depth-1,", "minUtility def a_minimize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth == 0: return", "1b # ########### # [Marking Scheme] # Points to note: # Must ensure", "max(maxUtility, minimize(c, depth - 1)) except IndexError: continue return maxUtility def minimize(mat, depth):", "range(len(mat[0])): if mat[row][col] == 0: emptySquareList.append([row, col]) return emptySquareList def getMaxTile(mat): maxTile =", "children.append(gridCopy) for child in children: minUtility = min(minUtility, a_maximize(child, alpha, beta, depth -", "range(len(mat)-1): #to check the left/right entries on the last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return", "return score def monotonicity(grid): grid_mask = [[2048, 1024, 256, 64], [1024, 256, 64,", "Mission N Solutions # # Note that written answers are commented out to", "row on the right and below for j in range(len(mat[0])-1): #more elegant to", "grade this one at the moment. I have it pegged to 8 (which", "range(0, 100): directions = ['up', 'down', 'left', 'right'] direction = directions[random.randint(0, len(directions) -", "while game_state(newMat)!='lose': try: newMat, done, score = move(newMat, direction) newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat)", "1 mark for creating the correct loop def new_tile(mat): seq = [2] *", "== mat[x][y]: if max_tile < 1024: dis = -((abs(x - 0) + abs(y", "'down', 'left', 'right'] direction = directions[random.randint(0, len(directions) - 1)] newMat = deepcopy(mat) gameScore", "depth-1 )) except IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha = max(maxUtility, alpha) if alpha >=", "= new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError: break scores.append(gameScore) return sum(scores)/len(scores) def expectimax(mat, depth, maximizer):", "have the given value. \"\"\" mat[row][col] = value return mat def move(game, direction):", "y in range(len(mat[x])): maxTile = max(maxTile, mat[x][y]) return maxTile def heuristic_score(mat): number_of_empty_cells =", "- 1)) if minUtility <= alpha: break beta = min(minUtility, beta) # print", "range(len(mat)-1): #intentionally reduced to check the row on the right and below for", "# 2 marks for correct solutions that work for all sizes of matrices", "so... # Check the down one. Reverse/transpose if ordered wrongly will give you", "0)) * max_tile) else: dis = -((abs(x - 0) + abs(y - 0))", "after shifting left game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0]", "j in range(len(mat[0])): if mat[i][j]==0: return 'not over' for k in range(len(mat)-1): #to", "def distance(mat, max_tile): dis = None for x in range(len(mat)): if dis: break", "done: maxUtility = max(maxUtility, a_minimize(c, alpha, beta, depth-1 )) except IndexError: print(\"error-----------------------------------------------------------------------------\") continue", "work for all sizes of matrices def transpose(mat): new=[] for i in range(len(mat[0])):", "for completely wrong solutions # 1 mark for getting only one condition correct", "direction) if done: maxUtility = max(maxUtility, a_minimize(c, alpha, beta, depth-1 )) except IndexError:", "direction) if done: maxUtility = max(maxUtility, minimize(c, depth - 1)) except IndexError: continue", "alpha: break beta = min(minUtility, beta) # print minUtility return minUtility def montecarlo(mat,", "if mat[i][j]==0: return 'not over' for k in range(len(mat)-1): #to check the left/right", "Scheme] # Points to note: # The way to do movement is compress", "temp[2] done=done or temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done, score) def left(game): # print(\"left\")", "below for j in range(len(mat[0])-1): #more elegant to use exceptions but most likely", "note: # Matrix elements must be equal but not identical # 0 marks", "mat[row][col] = value return mat def move(game, direction): if(direction==\"up\"): return up(game) elif direction==\"down\":", "'left', 'right'] for direction in d: newBoard = deepcopy(mat) newBoard, done, score =", "1, False) if calculatedValue > currentValue: currentValue = calculatedValue return currentValue else: number", "and mat[i][j]!=0: score += mat[i][j] * 2 mat[i][j]*=2 mat[i][j+1]=0 done=True return (mat,done, score)", "def minimize(mat, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) minUtility =", "side, and use transpose and reverse correctly they should # be able to", "# The way to do movement is compress -> merge -> compress again", "Return a list of empty cells. \"\"\" emptySquareList = [] for row in", "the matrix around # No idea how to grade this one at the", "empty_cells(mat) children = [] for c in emptyCells: gridCopy = deepcopy(mat) gridCopy =", "import deepcopy import math import random ####### #Task 1a# ####### # [Marking Scheme]", "all sizes of matrices def transpose(mat): new=[] for i in range(len(mat[0])): new.append([]) for", "\"\"\" Return a list of empty cells. \"\"\" emptySquareList = [] for row", "Programming Methodology # # Mission N Solutions # # Note that written answers", "return (mat,done, score) def empty_cells(mat): \"\"\" Return a list of empty cells. \"\"\"", "solutions # 1 mark for getting only one condition correct # 2 marks", "def reverse(mat): new=[] for i in range(len(mat)): new.append([]) for j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1])", "to get all correct so... # Check the down one. Reverse/transpose if ordered", "number == 0: return expectimax(mat, depth-1, True) return (sum_value/number) def set_tile(mat, row, col,", "ensure that it is created on a zero entry # 1 mark for", "return down(game) # down(game) elif direction == \"left\": return left(game) elif direction==\"right\": return", "if dis: break for y in range(len(mat)): if max_tile == mat[x][y]: if max_tile", "return right(game) def up(game): # print(\"up\") # return matrix after shifting up game=transpose(game)", "def right(game): # print(\"right\") # return matrix after shifting right game=reverse(game) game,done=cover_up(game) temp=merge(game)", "mat[i][j+1]==mat[i][j]: return 'not over' for i in range(len(mat)): #check for any zero entries", "abs(y - 0)) * max_tile) else: dis = -((abs(x - 0) + abs(y", "'right'] for direction in d: newBoard = deepcopy(mat) newBoard, done, score = move(newBoard,", "break return dis def a_maximize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth ==", "random import * from copy import deepcopy import math import random ####### #Task", "last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over' for j in range(len(mat)-1): #check up/down", "for correct solutions that work for all sizes of matrices def transpose(mat): new=[]", "1c # ########### # [Marking Scheme] # Points to note: # Matrix elements", "or depth == 0: return heuristic_score(mat) maxUtility = -float('inf') d = ['up', 'down',", "> currentValue: currentValue = calculatedValue return currentValue else: number = 0 sum_value =", "per up/down/left/right?) But if you get one correct likely to get all correct", "a list of empty cells. \"\"\" emptySquareList = [] for row in range(len(mat)):", "if depth==0: return heuristic_score(mat) if maximizer: currentValue = -1 d = ['up', 'down',", "in range(len(mat)-1): #intentionally reduced to check the row on the right and below", "heuristic_score(mat) minUtility = float('inf') emptyCells = empty_cells(mat) children = [] for c in", "== 0: emptySquareList.append([row, col]) return emptySquareList def getMaxTile(mat): maxTile = 0 for x", "note: # Matrix elements must be equal but not identical # 1 mark", "this one at the moment. I have it pegged to 8 (which gives", "row in range(3): for column in range(3): monotonicity_score += grid[row][column] * grid_mask[row][column] return", "or depth == 0: return heuristic_score(mat) minUtility = float('inf') emptyCells = empty_cells(mat) children", "'not over' for i in range(len(mat)): #check for any zero entries for j", "'not over' return 'lose' ########### # Task 2a # ########### # [Marking Scheme]", "x in range(len(mat)): if dis: break for y in range(len(mat)): if max_tile ==", "N Solutions # # Note that written answers are commented out to allow", "for i in range(len(mat)): new.append([]) for j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new ###########", "emptySquareList = empty_cells(mat) emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile return mat ########### #", "of matrices def transpose(mat): new=[] for i in range(len(mat[0])): new.append([]) for j in", "on the last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over' for j in range(len(mat)-1):", "heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat)) score = monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat) return", "how to grade this one at the moment. I have it pegged to", "[[2048, 1024, 256, 64], [1024, 256, 64, 16], [256, 64, 16, 4], [64,", "depth-1, True) return (sum_value/number) def set_tile(mat, row, col, value): \"\"\" Set the tile", "expectimax(mat, depth-1, True) return (sum_value/number) def set_tile(mat, row, col, value): \"\"\" Set the", "in d: newBoard = deepcopy(mat) newBoard, done, score = move(newBoard, direction) calculatedValue =", "likely to get all correct so... # Check the down one. Reverse/transpose if", "def game_state(mat): for i in range(len(mat)): for j in range(len(mat[0])): if mat[i][j]==2048: return", "[64, 16, 4, 1]] monotonicity_score = 0 for row in range(3): for column", "= ['up', 'down', 'left', 'right'] for direction in d: c = deepcopy(mat) try:", "last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over' return 'lose' ########### # Task 2a", "j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new ########### # Task 2b # ########### #", "given value. \"\"\" mat[row][col] = value return mat def move(game, direction): if(direction==\"up\"): return", "to use exceptions but most likely this will be their solution if mat[i][j]==mat[i+1][j]", "minUtility = float('inf') emptyCells = empty_cells(mat) children = [] for c in emptyCells:", "# 2 marks for getting two of the three conditions # 3 marks", "elif direction==\"down\": return down(game) # down(game) elif direction == \"left\": return left(game) elif", "if calculatedValue > currentValue: currentValue = calculatedValue return currentValue else: number = 0", "in range(len(mat)): for j in range(len(mat[0])): if mat[i][j]==2048: return 'win' for i in", "# down(game) elif direction == \"left\": return left(game) elif direction==\"right\": return right(game) def", "########### # Task 1c # ########### # [Marking Scheme] # Points to note:", "marks for correct solutions that work for all sizes of matrices def transpose(mat):", "if done: maxUtility = max(maxUtility, minimize(c, depth - 1)) except IndexError: continue return", "# print minUtility return minUtility def montecarlo(mat, initialScore): scores = [] for i", "conditions # 3 marks for correct checking def game_state(mat): for i in range(len(mat)):", "correct checking def game_state(mat): for i in range(len(mat)): for j in range(len(mat[0])): if", "for i in range(len(mat)): #check for any zero entries for j in range(len(mat[0])):", "+= grid[row][column] * grid_mask[row][column] return monotonicity_score def distance(mat, max_tile): dis = None for", "beta, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) minUtility = float('inf')", "max, startDepth): if max: return maximize(grid, startDepth) else: return minimize(grid, startDepth) def maximize(mat,", "entire thing just by flipping the matrix around # No idea how to", "except IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha = max(maxUtility, alpha) if alpha >= beta: break", "use exceptions but most likely this will be their solution if mat[i][j]==mat[i+1][j] or", "game,done=cover_up(game) temp=merge(game) game=temp[0] done=done or temp[1] score = temp[2] game=cover_up(game)[0] game=transpose(game) return (game,done,", "if mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count: done=True count+=1 return (new,done) def merge(mat): score =", "mat[i][j] * 2 mat[i][j]*=2 mat[i][j+1]=0 done=True return (mat,done, score) def empty_cells(mat): \"\"\" Return", "one side, and use transpose and reverse correctly they should # be able", "right and below for j in range(len(mat[0])-1): #more elegant to use exceptions but", "or mat[i][j+1]==mat[i][j]: return 'not over' for i in range(len(mat)): #check for any zero", "and reverse correctly they should # be able to solve the entire thing", "return minUtility def a_minimize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth == 0:", "entries for j in range(len(mat[0])): if mat[i][j]==0: return 'not over' for k in", "the three conditions # 3 marks for correct checking def game_state(mat): for i", "return 'not over' for j in range(len(mat)-1): #check up/down entries on last column", "entries on last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over' return 'lose' ########### #", "+ abs(y - 0)) * (max_tile / 2)) break return dis def a_maximize(mat,", "temp[1] game=cover_up(game)[0] return (game,done, score) def right(game): # print(\"right\") # return matrix after", "# return matrix after shifting right game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2]", "16, 4], [64, 16, 4, 1]] monotonicity_score = 0 for row in range(3):", "game_state(newMat)!='lose': try: newMat, done, score = move(newMat, direction) newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat) except", "you get one correct likely to get all correct so... # Check the", "range(len(mat[0])): if mat[i][j]==0: return 'not over' for k in range(len(mat)-1): #to check the", "the entire thing just by flipping the matrix around # No idea how", "set. from random import * from copy import deepcopy import math import random", "like, # 2 per up/down/left/right?) But if you get one correct likely to", "monotonicity_score = 0 for row in range(3): for column in range(3): monotonicity_score +=", "marks for correct solutions that work for all sizes of matrices def reverse(mat):", "- 1)) # print minUtility return minUtility def a_minimize(mat, alpha, beta, depth): if", "direction) calculatedValue = expectimax(newBoard, depth - 1, False) if calculatedValue > currentValue: currentValue", "= empty_cells(mat) emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile return mat ########### # Task", "else: number = 0 sum_value = 0 emptyCells = empty_cells(mat) children = []", "if mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score += mat[i][j] * 2 mat[i][j]*=2 mat[i][j+1]=0 done=True return", "shifting up game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0] done=done or temp[1] score = temp[2] game=cover_up(game)[0]", "0: return heuristic_score(mat) minUtility = float('inf') emptyCells = empty_cells(mat) children = [] for", "in range(len(mat[0])): if mat[i][j]==0: return 'not over' for k in range(len(mat)-1): #to check", "all correct so... # Check the down one. Reverse/transpose if ordered wrongly will", "j in range(3): if mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score += mat[i][j] * 2 mat[i][j]*=2", "i in range(n): matrix.append([0] * n) return matrix ########### # Task 1b #", "depth==0: return heuristic_score(mat) if maximizer: currentValue = -1 d = ['up', 'down', 'left',", "return a_maximize(grid, -float('inf'), float('inf'), startDepth) else: return a_minimize(grid, -float('inf'), float('inf'), startDepth) def minimax(grid,", "#Task 1a# ####### # [Marking Scheme] # Points to note: # Matrix elements", "[Marking Scheme] # Points to note: # Matrix elements must be equal but", "= monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat) return score def monotonicity(grid): grid_mask =", "c, done = move(c, direction) if done: maxUtility = max(maxUtility, a_minimize(c, alpha, beta,", "for correct solutions that work for all sizes of matrices def reverse(mat): new=[]", "= -float('inf') d = ['up', 'down', 'left', 'right'] for direction in d: c", "or temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done, score) def left(game): # print(\"left\") # return", "Points to note: # The way to do movement is compress -> merge", "def left(game): # print(\"left\") # return matrix after shifting left game,done=cover_up(game) temp=merge(game) game=temp[0]", "game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] return (game,done, score) def right(game):", "max_tile) else: dis = -((abs(x - 0) + abs(y - 0)) * (max_tile", "deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 2) children.append(gridCopy) gridCopy = deepcopy(mat) gridCopy =", "if j!=count: done=True count+=1 return (new,done) def merge(mat): score = 0 done=False for", "left game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] return (game,done,", "# be able to solve the entire thing just by flipping the matrix", "in range(4): for j in range(3): if mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score += mat[i][j]", "beta, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) maxUtility = -float('inf')", "will give you wrong result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i in range(4):", "maximize(child, depth - 1)) # print minUtility return minUtility def a_minimize(mat, alpha, beta,", "must be equal but not identical # 0 marks for completely wrong solutions", "for all sizes of matrices def transpose(mat): new=[] for i in range(len(mat[0])): new.append([])", "correctly they should # be able to solve the entire thing just by", "deepcopy(mat) newBoard, done, score = move(newBoard, direction) calculatedValue = expectimax(newBoard, depth - 1,", "#check for any zero entries for j in range(len(mat[0])): if mat[i][j]==0: return 'not", "range(len(mat)): new.append([]) for j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new ########### # Task 2b", "check the left/right entries on the last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over'", "c[0], c[1], 4) children.append(gridCopy) for child in children: minUtility = min(minUtility, a_maximize(child, alpha,", "game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=reverse(game) return (game,done,", "for all sizes of matrices def reverse(mat): new=[] for i in range(len(mat)): new.append([])", "for i in range(len(mat[0])): new.append([]) for j in range(len(mat)): new[i].append(mat[j][i]) return new ##########", "deepcopy(mat) try: c, done = move(c, direction) if done: maxUtility = max(maxUtility, a_minimize(c,", "c, done = move(c, direction) if done: maxUtility = max(maxUtility, minimize(c, depth -", "getting two of the three conditions # 3 marks for correct checking def", "max_tile < 1024: dis = -((abs(x - 0) + abs(y - 0)) *", "mat[x][y]: if max_tile < 1024: dis = -((abs(x - 0) + abs(y -", "return (sum_value/number) def set_tile(mat, row, col, value): \"\"\" Set the tile at position", "direction in d: newBoard = deepcopy(mat) newBoard, done, score = move(newBoard, direction) calculatedValue", "c[0], c[1], 4) children.append(gridCopy) for child in children: minUtility = min(minUtility, maximize(child, depth", "mark for getting only one condition correct # 2 marks for getting two", "checking def game_state(mat): for i in range(len(mat)): for j in range(len(mat[0])): if mat[i][j]==2048:", "range(4): if mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count: done=True count+=1 return (new,done) def merge(mat): score", "= min(minUtility, maximize(child, depth - 1)) # print minUtility return minUtility def a_minimize(mat,", "2b # ########### # [Marking Scheme] # Points to note: # 0 marks", "# print(\"right\") # return matrix after shifting right game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0] score", "minUtility <= alpha: break beta = min(minUtility, beta) # print minUtility return minUtility", "4) children.append(gridCopy) for child in children: minUtility = min(minUtility, a_maximize(child, alpha, beta, depth", "Set the tile at position row, col to have the given value. \"\"\"", "Points to note: # 0 marks for completely incorrect solutions # 1 mark", "if they can solve one side, and use transpose and reverse correctly they", "import random ####### #Task 1a# ####### # [Marking Scheme] # Points to note:", "for j in range(len(mat[0])): if mat[i][j]==0: return 'not over' for k in range(len(mat)-1):", "left(game) elif direction==\"right\": return right(game) def up(game): # print(\"up\") # return matrix after", "['up', 'down', 'left', 'right'] for direction in d: newBoard = deepcopy(mat) newBoard, done,", "for y in range(len(mat[x])): maxTile = max(maxTile, mat[x][y]) return maxTile def heuristic_score(mat): number_of_empty_cells", "float('inf'), startDepth) def minimax(grid, max, startDepth): if max: return maximize(grid, startDepth) else: return", "you wrong result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i in range(4): count=0 for", "in range(len(mat[0])): if mat[i][j]==2048: return 'win' for i in range(len(mat)-1): #intentionally reduced to", "def monotonicity(grid): grid_mask = [[2048, 1024, 256, 64], [1024, 256, 64, 16], [256,", "* max_tile) else: dis = -((abs(x - 0) + abs(y - 0)) *", "depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) maxUtility = -float('inf') d", "alpha >= beta: break return maxUtility def alphaBeta(grid, max, startDepth): if max: return", "transpose and reverse correctly they should # be able to solve the entire", "range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new ########### # Task 2b # ########### # [Marking Scheme]", "range(4): count=0 for j in range(4): if mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count: done=True count+=1", "for getting two of the three conditions # 3 marks for correct checking", "= move(newMat, direction) newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError: break scores.append(gameScore) return sum(scores)/len(scores)", "alphaBeta(grid, max, startDepth): if max: return a_maximize(grid, -float('inf'), float('inf'), startDepth) else: return a_minimize(grid,", "if max_tile < 1024: dis = -((abs(x - 0) + abs(y - 0))", "d: newBoard = deepcopy(mat) newBoard, done, score = move(newBoard, direction) calculatedValue = expectimax(newBoard,", "2 per up/down/left/right?) But if you get one correct likely to get all", "math import random ####### #Task 1a# ####### # [Marking Scheme] # Points to", "def heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat)) score = monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat)", "except IndexError: break scores.append(gameScore) return sum(scores)/len(scores) def expectimax(mat, depth, maximizer): if depth==0: return", "that it is created on a zero entry # 1 mark for creating", "deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children: sum_value+=", "max(maxUtility, alpha) if alpha >= beta: break return maxUtility def alphaBeta(grid, max, startDepth):", "in children: sum_value+= expectimax(child, depth-1, True) number+=1 if number == 0: return expectimax(mat,", "c[1], 4) children.append(gridCopy) for child in children: minUtility = min(minUtility, a_maximize(child, alpha, beta,", "around # No idea how to grade this one at the moment. I", "# [Marking Scheme] # Points to note: # 0 marks for completely incorrect", "########### # Task 1b # ########### # [Marking Scheme] # Points to note:", "#more elegant to use exceptions but most likely this will be their solution", "# Basically if they can solve one side, and use transpose and reverse", "get all correct so... # Check the down one. Reverse/transpose if ordered wrongly", "if mat[i][j]==2048: return 'win' for i in range(len(mat)-1): #intentionally reduced to check the", "def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i in range(4): count=0 for j in range(4):", "creating the correct matrix def new_game(n): matrix = [] for i in range(n):", "IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha = max(maxUtility, alpha) if alpha >= beta: break return", "in emptyCells: gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 2) children.append(gridCopy) gridCopy", "right(game) def up(game): # print(\"up\") # return matrix after shifting up game=transpose(game) game,done=cover_up(game)", "beta, depth - 1)) if minUtility <= alpha: break beta = min(minUtility, beta)", "new_tile(mat): seq = [2] * 90 + [4] newTile = choice(seq) emptySquareList =", "return 'win' for i in range(len(mat)-1): #intentionally reduced to check the row on", "for column in range(3): monotonicity_score += grid[row][column] * grid_mask[row][column] return monotonicity_score def distance(mat,", "maximize(grid, startDepth) else: return minimize(grid, startDepth) def maximize(mat, depth): if game_state(mat)=='lose' or depth", "the down one. Reverse/transpose if ordered wrongly will give you wrong result. def", "Reverse/transpose if ordered wrongly will give you wrong result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False", "['up', 'down', 'left', 'right'] direction = directions[random.randint(0, len(directions) - 1)] newMat = deepcopy(mat)", "- 1, False) if calculatedValue > currentValue: currentValue = calculatedValue return currentValue else:", "empty cells. \"\"\" emptySquareList = [] for row in range(len(mat)): for col in", "for creating the correct loop def new_tile(mat): seq = [2] * 90 +", "equal but not identical # 1 mark for creating the correct matrix def", "'not over' for j in range(len(mat)-1): #check up/down entries on last column if", "return matrix after shifting left game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or", "# # CS1010FC --- Programming Methodology # # Mission N Solutions # #", "Points to note: # Must ensure that it is created on a zero", "game=cover_up(game)[0] return (game,done, score) def right(game): # print(\"right\") # return matrix after shifting", "range(len(mat)-1): #check up/down entries on last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over' return", "row, col to have the given value. \"\"\" mat[row][col] = value return mat", "score) def right(game): # print(\"right\") # return matrix after shifting right game=reverse(game) game,done=cover_up(game)", "marks for completely wrong solutions # 1 mark for getting only one condition", "= min(minUtility, beta) # print minUtility return minUtility def montecarlo(mat, initialScore): scores =", "score) def empty_cells(mat): \"\"\" Return a list of empty cells. \"\"\" emptySquareList =", "entries on the last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over' for j in", "= ['up', 'down', 'left', 'right'] direction = directions[random.randint(0, len(directions) - 1)] newMat =", "= None for x in range(len(mat)): if dis: break for y in range(len(mat)):", "beta = min(minUtility, beta) # print minUtility return minUtility def montecarlo(mat, initialScore): scores", "and below for j in range(len(mat[0])-1): #more elegant to use exceptions but most", "# ########### # [Marking Scheme] # Points to note: # Matrix elements must", "should # be able to solve the entire thing just by flipping the", "of empty cells. \"\"\" emptySquareList = [] for row in range(len(mat)): for col", "note: # The way to do movement is compress -> merge -> compress", "for solutions that show general understanding # 2 marks for correct solutions that", "most likely this will be their solution if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return 'not", "alpha = max(maxUtility, alpha) if alpha >= beta: break return maxUtility def alphaBeta(grid,", "# code easily while grading your problem set. from random import * from", "# [Marking Scheme] # Points to note: # Matrix elements must be equal", "0 done=False for i in range(4): for j in range(3): if mat[i][j]==mat[i][j+1] and", "but most likely this will be their solution if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return", "# Points to note: # 0 marks for completely incorrect solutions # 1", "new ########## # Task 3 # ########## # [Marking Scheme] # Points to", "game_state(mat): for i in range(len(mat)): for j in range(len(mat[0])): if mat[i][j]==2048: return 'win'", "new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError: break scores.append(gameScore) return sum(scores)/len(scores) def expectimax(mat, depth, maximizer): if", "= 0 sum_value = 0 emptyCells = empty_cells(mat) children = [] for c", "print(\"up\") # return matrix after shifting up game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0] done=done or", "one condition correct # 2 marks for getting two of the three conditions", "0 marks for completely incorrect solutions # 1 mark for solutions that show", "that show general understanding # 2 marks for correct solutions that work for", "cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i in range(4): count=0 for j in range(4): if", "expectimax(mat, depth, maximizer): if depth==0: return heuristic_score(mat) if maximizer: currentValue = -1 d", "at the moment. I have it pegged to 8 (which gives you like,", "= deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 2) children.append(gridCopy) gridCopy = deepcopy(mat) gridCopy", "for j in range(len(mat)-1): #check up/down entries on last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return", "'down', 'left', 'right'] for direction in d: c = deepcopy(mat) try: c, done", "temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] return (game,done, score) def", "monotonicity(grid): grid_mask = [[2048, 1024, 256, 64], [1024, 256, 64, 16], [256, 64,", "in range(len(mat)): #check for any zero entries for j in range(len(mat[0])): if mat[i][j]==0:", "= choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile return mat ########### # Task 1c # ###########", "== \"left\": return left(game) elif direction==\"right\": return right(game) def up(game): # print(\"up\") #", "########### # [Marking Scheme] # Points to note: # Matrix elements must be", "* n) return matrix ########### # Task 1b # ########### # [Marking Scheme]", "for i in range(len(mat)): for j in range(len(mat[0])): if mat[i][j]==2048: return 'win' for", "answers are commented out to allow us to run your # code easily", "temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=reverse(game) return (game,done, score)", "a_minimize(grid, -float('inf'), float('inf'), startDepth) def minimax(grid, max, startDepth): if max: return maximize(grid, startDepth)", "range(len(mat[0])-1): #more elegant to use exceptions but most likely this will be their", "= newTile return mat ########### # Task 1c # ########### # [Marking Scheme]", "for direction in d: c = deepcopy(mat) try: c, done = move(c, direction)", "2 marks for getting two of the three conditions # 3 marks for", "compress -> merge -> compress again # Basically if they can solve one", "for y in range(len(mat)): if max_tile == mat[x][y]: if max_tile < 1024: dis", "for x in range(len(mat)): for y in range(len(mat[x])): maxTile = max(maxTile, mat[x][y]) return", "4) children.append(gridCopy) for child in children: sum_value+= expectimax(child, depth-1, True) number+=1 if number", "mat[i][j]*=2 mat[i][j+1]=0 done=True return (mat,done, score) def empty_cells(mat): \"\"\" Return a list of", "[Marking Scheme] # Points to note: # 0 marks for completely incorrect solutions", "to note: # The way to do movement is compress -> merge ->", "# [Marking Scheme] # Points to note: # The way to do movement", "the correct loop def new_tile(mat): seq = [2] * 90 + [4] newTile", "matrix = [] for i in range(n): matrix.append([0] * n) return matrix ###########", "= choice(seq) emptySquareList = empty_cells(mat) emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile return mat", "[1024, 256, 64, 16], [256, 64, 16, 4], [64, 16, 4, 1]] monotonicity_score", "to run your # code easily while grading your problem set. from random", "range(3): monotonicity_score += grid[row][column] * grid_mask[row][column] return monotonicity_score def distance(mat, max_tile): dis =", "- 1)] newMat = deepcopy(mat) gameScore = initialScore while game_state(newMat)!='lose': try: newMat, done,", "range(len(mat)): #check for any zero entries for j in range(len(mat[0])): if mat[i][j]==0: return", "way to do movement is compress -> merge -> compress again # Basically", "= [] for row in range(len(mat)): for col in range(len(mat[0])): if mat[row][col] ==", "########### # Task 2a # ########### # [Marking Scheme] # Points to note:", "count+=1 return (new,done) def merge(mat): score = 0 done=False for i in range(4):", "i in range(len(mat)): #check for any zero entries for j in range(len(mat[0])): if", "i in range(len(mat)-1): #intentionally reduced to check the row on the right and", "alpha, beta, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) minUtility =", "= move(newBoard, direction) calculatedValue = expectimax(newBoard, depth - 1, False) if calculatedValue >", "0 emptyCells = empty_cells(mat) children = [] for c in emptyCells: gridCopy =", "= temp[2] game=cover_up(game)[0] game=transpose(game) return (game,done, score) def down(game): # print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game)", "down(game) elif direction == \"left\": return left(game) elif direction==\"right\": return right(game) def up(game):", "#check up/down entries on last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over' return 'lose'", "new[i].append(mat[j][i]) return new ########## # Task 3 # ########## # [Marking Scheme] #", "4], [64, 16, 4, 1]] monotonicity_score = 0 for row in range(3): for", "temp[1] score = temp[2] game=cover_up(game)[0] game=transpose(game) return (game,done, score) def down(game): # print(\"down\")", "gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 2) children.append(gridCopy) gridCopy = deepcopy(mat)", "a_maximize(child, alpha, beta, depth - 1)) if minUtility <= alpha: break beta =", "'win' for i in range(len(mat)-1): #intentionally reduced to check the row on the", "move(c, direction) if done: maxUtility = max(maxUtility, minimize(c, depth - 1)) except IndexError:", "0 for row in range(3): for column in range(3): monotonicity_score += grid[row][column] *", "creating the correct loop def new_tile(mat): seq = [2] * 90 + [4]", "k in range(len(mat)-1): #to check the left/right entries on the last row if", "a_minimize(c, alpha, beta, depth-1 )) except IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha = max(maxUtility, alpha)", "return 'not over' for i in range(len(mat)): #check for any zero entries for", "game=transpose(reverse(game)) return (game,done, score) def left(game): # print(\"left\") # return matrix after shifting", "new ########### # Task 2b # ########### # [Marking Scheme] # Points to", "reverse correctly they should # be able to solve the entire thing just", "if max: return maximize(grid, startDepth) else: return minimize(grid, startDepth) def maximize(mat, depth): if", "= set_tile(gridCopy, c[0], c[1], 2) children.append(gridCopy) gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0],", "beta: break return maxUtility def alphaBeta(grid, max, startDepth): if max: return a_maximize(grid, -float('inf'),", "= -1 d = ['up', 'down', 'left', 'right'] for direction in d: newBoard", "d = ['up', 'down', 'left', 'right'] for direction in d: c = deepcopy(mat)", "zero entry # 1 mark for creating the correct loop def new_tile(mat): seq", "but not identical # 0 marks for completely wrong solutions # 1 mark", "general understanding # 2 marks for correct solutions that work for all sizes", "done=False for i in range(4): for j in range(3): if mat[i][j]==mat[i][j+1] and mat[i][j]!=0:", "2)) break return dis def a_maximize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth", "range(len(mat)): if dis: break for y in range(len(mat)): if max_tile == mat[x][y]: if", "matrix ########### # Task 1b # ########### # [Marking Scheme] # Points to", "dis = -((abs(x - 0) + abs(y - 0)) * max_tile) else: dis", "(game,done, score) def left(game): # print(\"left\") # return matrix after shifting left game,done=cover_up(game)", "64, 16, 4], [64, 16, 4, 1]] monotonicity_score = 0 for row in", "None for x in range(len(mat)): if dis: break for y in range(len(mat)): if", "marks for completely incorrect solutions # 1 mark for solutions that show general", "newMat = deepcopy(mat) gameScore = initialScore while game_state(newMat)!='lose': try: newMat, done, score =", "maxUtility = max(maxUtility, minimize(c, depth - 1)) except IndexError: continue return maxUtility def", "3 # ########## # [Marking Scheme] # Points to note: # The way", "= max(maxUtility, minimize(c, depth - 1)) except IndexError: continue return maxUtility def minimize(mat,", "return heuristic_score(mat) if maximizer: currentValue = -1 d = ['up', 'down', 'left', 'right']", "emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile return mat ########### # Task 1c #", "maxTile = max(maxTile, mat[x][y]) return maxTile def heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat)) score =", "1024, 256, 64], [1024, 256, 64, 16], [256, 64, 16, 4], [64, 16,", "set_tile(gridCopy, c[0], c[1], 2) children.append(gridCopy) gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1],", "alpha, beta, depth-1 )) except IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha = max(maxUtility, alpha) if", "this will be their solution if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return 'not over' for", "return new ########### # Task 2b # ########### # [Marking Scheme] # Points", "return matrix ########### # Task 1b # ########### # [Marking Scheme] # Points", "- 0)) * (max_tile / 2)) break return dis def a_maximize(mat, alpha, beta,", "expectimax(child, depth-1, True) number+=1 if number == 0: return expectimax(mat, depth-1, True) return", "matrix around # No idea how to grade this one at the moment.", "mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return 'not over' for i in range(len(mat)): #check for any", "deepcopy(mat) try: c, done = move(c, direction) if done: maxUtility = max(maxUtility, minimize(c,", "seq = [2] * 90 + [4] newTile = choice(seq) emptySquareList = empty_cells(mat)", "row, col, value): \"\"\" Set the tile at position row, col to have", "in range(len(mat[0])): if mat[row][col] == 0: emptySquareList.append([row, col]) return emptySquareList def getMaxTile(mat): maxTile", "to allow us to run your # code easily while grading your problem", "heuristic_score(mat) if maximizer: currentValue = -1 d = ['up', 'down', 'left', 'right'] for", "for any zero entries for j in range(len(mat[0])): if mat[i][j]==0: return 'not over'", "distance(mat, max_tile): dis = None for x in range(len(mat)): if dis: break for", "return currentValue else: number = 0 sum_value = 0 emptyCells = empty_cells(mat) children", "in range(3): for column in range(3): monotonicity_score += grid[row][column] * grid_mask[row][column] return monotonicity_score", "startDepth): if max: return maximize(grid, startDepth) else: return minimize(grid, startDepth) def maximize(mat, depth):", "[256, 64, 16, 4], [64, 16, 4, 1]] monotonicity_score = 0 for row", "matrix after shifting up game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0] done=done or temp[1] score =", "try: newMat, done, score = move(newMat, direction) newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError:", "if alpha >= beta: break return maxUtility def alphaBeta(grid, max, startDepth): if max:", "return matrix after shifting up game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0] done=done or temp[1] score", "all sizes of matrices def reverse(mat): new=[] for i in range(len(mat)): new.append([]) for", "max: return maximize(grid, startDepth) else: return minimize(grid, startDepth) def maximize(mat, depth): if game_state(mat)=='lose'", "give you wrong result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i in range(4): count=0", "return minUtility def montecarlo(mat, initialScore): scores = [] for i in range(0, 100):", "depth-1, True) number+=1 if number == 0: return expectimax(mat, depth-1, True) return (sum_value/number)", "minimize(mat, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) minUtility = float('inf')", "if mat[row][col] == 0: emptySquareList.append([row, col]) return emptySquareList def getMaxTile(mat): maxTile = 0", "the row on the right and below for j in range(len(mat[0])-1): #more elegant", "for j in range(4): if mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count: done=True count+=1 return (new,done)", "c[1], 2) children.append(gridCopy) gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy)", "transpose(mat): new=[] for i in range(len(mat[0])): new.append([]) for j in range(len(mat)): new[i].append(mat[j][i]) return", "for completely incorrect solutions # 1 mark for solutions that show general understanding", "to 8 (which gives you like, # 2 per up/down/left/right?) But if you", "# # Mission N Solutions # # Note that written answers are commented", "def new_tile(mat): seq = [2] * 90 + [4] newTile = choice(seq) emptySquareList", "256, 64, 16], [256, 64, 16, 4], [64, 16, 4, 1]] monotonicity_score =", "over' for i in range(len(mat)): #check for any zero entries for j in", "in range(len(mat[0])-1): #more elegant to use exceptions but most likely this will be", "is compress -> merge -> compress again # Basically if they can solve", "result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i in range(4): count=0 for j in", "gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in", "game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] return (game,done, score)", "return (game,done, score) def right(game): # print(\"right\") # return matrix after shifting right", "solve the entire thing just by flipping the matrix around # No idea", "mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score += mat[i][j] * 2 mat[i][j]*=2 mat[i][j+1]=0 done=True return (mat,done,", "mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over' return 'lose' ########### # Task 2a # ########### #", "for col in range(len(mat[0])): if mat[row][col] == 0: emptySquareList.append([row, col]) return emptySquareList def", "for j in range(len(mat[0])-1): #more elegant to use exceptions but most likely this", "-((abs(x - 0) + abs(y - 0)) * (max_tile / 2)) break return", "abs(y - 0)) * (max_tile / 2)) break return dis def a_maximize(mat, alpha,", "maximizer: currentValue = -1 d = ['up', 'down', 'left', 'right'] for direction in", "= value return mat def move(game, direction): if(direction==\"up\"): return up(game) elif direction==\"down\": return", "# print(\"up\") # return matrix after shifting up game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0] done=done", "float('inf') emptyCells = empty_cells(mat) children = [] for c in emptyCells: gridCopy =", "down(game): # print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1]", "right game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=reverse(game)", "Solutions # # Note that written answers are commented out to allow us", "Task 3 # ########## # [Marking Scheme] # Points to note: # The", "set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children: sum_value+= expectimax(child, depth-1, True)", "emptySquareList def getMaxTile(mat): maxTile = 0 for x in range(len(mat)): for y in", "score) def left(game): # print(\"left\") # return matrix after shifting left game,done=cover_up(game) temp=merge(game)", "= len(empty_cells(mat)) score = monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat) return score def", "up/down/left/right?) But if you get one correct likely to get all correct so...", "child in children: sum_value+= expectimax(child, depth-1, True) number+=1 if number == 0: return", "1)) # print minUtility return minUtility def a_minimize(mat, alpha, beta, depth): if game_state(mat)=='lose'", "on the right and below for j in range(len(mat[0])-1): #more elegant to use", "max_tile): dis = None for x in range(len(mat)): if dis: break for y", "montecarlo(mat, initialScore): scores = [] for i in range(0, 100): directions = ['up',", "correct # 2 marks for getting two of the three conditions # 3", "incorrect solutions # 1 mark for solutions that show general understanding # 2", "# Matrix elements must be equal but not identical # 1 mark for", "# Task 1c # ########### # [Marking Scheme] # Points to note: #", "wrongly will give you wrong result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i in", "# Must ensure that it is created on a zero entry # 1", ">= beta: break return maxUtility def alphaBeta(grid, max, startDepth): if max: return a_maximize(grid,", "if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) minUtility = float('inf') emptyCells =", "j in range(len(mat[0])): if mat[i][j]==2048: return 'win' for i in range(len(mat)-1): #intentionally reduced", "maxUtility = max(maxUtility, a_minimize(c, alpha, beta, depth-1 )) except IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha", "c[1], 4) children.append(gridCopy) for child in children: sum_value+= expectimax(child, depth-1, True) number+=1 if", "likely this will be their solution if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return 'not over'", "for x in range(len(mat)): if dis: break for y in range(len(mat)): if max_tile", "getting only one condition correct # 2 marks for getting two of the", "1024: dis = -((abs(x - 0) + abs(y - 0)) * max_tile) else:", "game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return", "note: # Must ensure that it is created on a zero entry #", "# CS1010FC --- Programming Methodology # # Mission N Solutions # # Note", "move(c, direction) if done: maxUtility = max(maxUtility, a_minimize(c, alpha, beta, depth-1 )) except", "in children: minUtility = min(minUtility, maximize(child, depth - 1)) # print minUtility return", "# 1 mark for creating the correct matrix def new_game(n): matrix = []", "return mat ########### # Task 1c # ########### # [Marking Scheme] # Points", "heuristic_score(mat) maxUtility = -float('inf') d = ['up', 'down', 'left', 'right'] for direction in", "break for y in range(len(mat)): if max_tile == mat[x][y]: if max_tile < 1024:", "# Points to note: # The way to do movement is compress ->", "(which gives you like, # 2 per up/down/left/right?) But if you get one", "if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over' for j in range(len(mat)-1): #check up/down entries on", "= expectimax(newBoard, depth - 1, False) if calculatedValue > currentValue: currentValue = calculatedValue", "4, 1]] monotonicity_score = 0 for row in range(3): for column in range(3):", "# print(\"left\") # return matrix after shifting left game,done=cover_up(game) temp=merge(game) game=temp[0] score =", "the right and below for j in range(len(mat[0])-1): #more elegant to use exceptions", "new=[] for i in range(len(mat)): new.append([]) for j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new", "except IndexError: continue return maxUtility def minimize(mat, depth): if game_state(mat)=='lose' or depth ==", "= [] for i in range(0, 100): directions = ['up', 'down', 'left', 'right']", "initialScore while game_state(newMat)!='lose': try: newMat, done, score = move(newMat, direction) newMat = new_tile(newMat)", "new=[] for i in range(len(mat[0])): new.append([]) for j in range(len(mat)): new[i].append(mat[j][i]) return new", "return new ########## # Task 3 # ########## # [Marking Scheme] # Points", "startDepth) def minimax(grid, max, startDepth): if max: return maximize(grid, startDepth) else: return minimize(grid,", "if maximizer: currentValue = -1 d = ['up', 'down', 'left', 'right'] for direction", "= 0 for x in range(len(mat)): for y in range(len(mat[x])): maxTile = max(maxTile,", "maxUtility = -float('inf') d = ['up', 'down', 'left', 'right'] for direction in d:", ")) except IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha = max(maxUtility, alpha) if alpha >= beta:", "# # Note that written answers are commented out to allow us to", "solutions that show general understanding # 2 marks for correct solutions that work", "0) + abs(y - 0)) * max_tile) else: dis = -((abs(x - 0)", "return 'not over' for k in range(len(mat)-1): #to check the left/right entries on", "in range(len(mat)-1): #to check the left/right entries on the last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]:", "game=cover_up(game)[0] game=transpose(game) return (game,done, score) def down(game): # print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0]", "in range(len(mat)): new.append([]) for j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new ########### # Task", "initialScore): scores = [] for i in range(0, 100): directions = ['up', 'down',", "(new,done) def merge(mat): score = 0 done=False for i in range(4): for j", "or temp[1] score = temp[2] game=cover_up(game)[0] game=transpose(game) return (game,done, score) def down(game): #", "maxUtility def minimize(mat, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) minUtility", "i in range(4): for j in range(3): if mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score +=", "if max: return a_maximize(grid, -float('inf'), float('inf'), startDepth) else: return a_minimize(grid, -float('inf'), float('inf'), startDepth)", "do movement is compress -> merge -> compress again # Basically if they", "90 + [4] newTile = choice(seq) emptySquareList = empty_cells(mat) emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]]", "alpha, beta, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) maxUtility =", "compress again # Basically if they can solve one side, and use transpose", "dis = None for x in range(len(mat)): if dis: break for y in", "= deepcopy(mat) newBoard, done, score = move(newBoard, direction) calculatedValue = expectimax(newBoard, depth -", "mark for creating the correct matrix def new_game(n): matrix = [] for i", "return left(game) elif direction==\"right\": return right(game) def up(game): # print(\"up\") # return matrix", "matrices def reverse(mat): new=[] for i in range(len(mat)): new.append([]) for j in range(len(mat[0])):", "count=0 for j in range(4): if mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count: done=True count+=1 return", "break return maxUtility def alphaBeta(grid, max, startDepth): if max: return a_maximize(grid, -float('inf'), float('inf'),", "row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over' for j in range(len(mat)-1): #check up/down entries", "# 0 marks for completely wrong solutions # 1 mark for getting only", "correct solutions that work for all sizes of matrices def transpose(mat): new=[] for", "any zero entries for j in range(len(mat[0])): if mat[i][j]==0: return 'not over' for", "= calculatedValue return currentValue else: number = 0 sum_value = 0 emptyCells =", "after shifting up game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0] done=done or temp[1] score = temp[2]", "new_game(n): matrix = [] for i in range(n): matrix.append([0] * n) return matrix", "def move(game, direction): if(direction==\"up\"): return up(game) elif direction==\"down\": return down(game) # down(game) elif", "return dis def a_maximize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth == 0:", "game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0] done=done or temp[1] score = temp[2] game=cover_up(game)[0] game=transpose(game) return", "= -((abs(x - 0) + abs(y - 0)) * (max_tile / 2)) break", "True) number+=1 if number == 0: return expectimax(mat, depth-1, True) return (sum_value/number) def", "note: # 0 marks for completely incorrect solutions # 1 mark for solutions", "[2] * 90 + [4] newTile = choice(seq) emptySquareList = empty_cells(mat) emptySquare =", "if ordered wrongly will give you wrong result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for", "= temp[2] done=done or temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done, score) def left(game): #", "score = move(newMat, direction) newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError: break scores.append(gameScore) return", "= directions[random.randint(0, len(directions) - 1)] newMat = deepcopy(mat) gameScore = initialScore while game_state(newMat)!='lose':", "dis: break for y in range(len(mat)): if max_tile == mat[x][y]: if max_tile <", "children.append(gridCopy) gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child", "zero entries for j in range(len(mat[0])): if mat[i][j]==0: return 'not over' for k", "equal but not identical # 0 marks for completely wrong solutions # 1", "score def monotonicity(grid): grid_mask = [[2048, 1024, 256, 64], [1024, 256, 64, 16],", "from random import * from copy import deepcopy import math import random #######", "minUtility return minUtility def a_minimize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth ==", "[Marking Scheme] # Points to note: # The way to do movement is", "0)) * (max_tile / 2)) break return dis def a_maximize(mat, alpha, beta, depth):", "to note: # Matrix elements must be equal but not identical # 1", "done=True count+=1 return (new,done) def merge(mat): score = 0 done=False for i in", "be equal but not identical # 0 marks for completely wrong solutions #", "new.append([]) for j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new ########### # Task 2b #", "Must ensure that it is created on a zero entry # 1 mark", "break beta = min(minUtility, beta) # print minUtility return minUtility def montecarlo(mat, initialScore):", "after shifting right game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1]", "in range(len(mat)): if dis: break for y in range(len(mat)): if max_tile == mat[x][y]:", "Methodology # # Mission N Solutions # # Note that written answers are", "reduced to check the row on the right and below for j in", "c = deepcopy(mat) try: c, done = move(c, direction) if done: maxUtility =", "that written answers are commented out to allow us to run your #", "new[i].append(mat[i][len(mat[0])-j-1]) return new ########### # Task 2b # ########### # [Marking Scheme] #", "empty_cells(mat): \"\"\" Return a list of empty cells. \"\"\" emptySquareList = [] for", "j in range(4): if mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count: done=True count+=1 return (new,done) def", "work for all sizes of matrices def reverse(mat): new=[] for i in range(len(mat)):", "maximize(mat, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) maxUtility = -float('inf')", "= [] for c in emptyCells: gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0],", "########### # Task 2b # ########### # [Marking Scheme] # Points to note:", "done, score = move(newMat, direction) newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError: break scores.append(gameScore)", "calculatedValue return currentValue else: number = 0 sum_value = 0 emptyCells = empty_cells(mat)", "elif direction==\"right\": return right(game) def up(game): # print(\"up\") # return matrix after shifting", "temp[2] game=cover_up(game)[0] game=transpose(game) return (game,done, score) def down(game): # print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game)", "Task 2a # ########### # [Marking Scheme] # Points to note: # 0", "16], [256, 64, 16, 4], [64, 16, 4, 1]] monotonicity_score = 0 for", "move(newMat, direction) newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError: break scores.append(gameScore) return sum(scores)/len(scores) def", "'right'] for direction in d: c = deepcopy(mat) try: c, done = move(c,", "the tile at position row, col to have the given value. \"\"\" mat[row][col]", "# No idea how to grade this one at the moment. I have", "if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) maxUtility = -float('inf') d =", "merge -> compress again # Basically if they can solve one side, and", "in range(4): count=0 for j in range(4): if mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count: done=True", "= 0 done=False for i in range(4): for j in range(3): if mat[i][j]==mat[i][j+1]", "mat[i][j+1]=0 done=True return (mat,done, score) def empty_cells(mat): \"\"\" Return a list of empty", "startDepth) def maximize(mat, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) maxUtility", "100): directions = ['up', 'down', 'left', 'right'] direction = directions[random.randint(0, len(directions) - 1)]", "range(len(mat)): new[i].append(mat[j][i]) return new ########## # Task 3 # ########## # [Marking Scheme]", "currentValue = -1 d = ['up', 'down', 'left', 'right'] for direction in d:", "* 90 + [4] newTile = choice(seq) emptySquareList = empty_cells(mat) emptySquare = choice(emptySquareList)", "position row, col to have the given value. \"\"\" mat[row][col] = value return", "########## # [Marking Scheme] # Points to note: # The way to do", "score = move(newBoard, direction) calculatedValue = expectimax(newBoard, depth - 1, False) if calculatedValue", "return (game,done, score) def left(game): # print(\"left\") # return matrix after shifting left", "empty_cells(mat) emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile return mat ########### # Task 1c", "return maxTile def heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat)) score = monotonicity(mat)*1.5 + number_of_empty_cells*2 +", "of matrices def reverse(mat): new=[] for i in range(len(mat)): new.append([]) for j in", "range(len(mat[0])): new.append([]) for j in range(len(mat)): new[i].append(mat[j][i]) return new ########## # Task 3", "on a zero entry # 1 mark for creating the correct loop def", "shifting left game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] return", "it pegged to 8 (which gives you like, # 2 per up/down/left/right?) But", "-float('inf'), float('inf'), startDepth) else: return a_minimize(grid, -float('inf'), float('inf'), startDepth) def minimax(grid, max, startDepth):", "emptySquareList.append([row, col]) return emptySquareList def getMaxTile(mat): maxTile = 0 for x in range(len(mat)):", "IndexError: continue return maxUtility def minimize(mat, depth): if game_state(mat)=='lose' or depth == 0:", "correct solutions that work for all sizes of matrices def reverse(mat): new=[] for", "/ 2)) break return dis def a_maximize(mat, alpha, beta, depth): if game_state(mat)=='lose' or", "if done: maxUtility = max(maxUtility, a_minimize(c, alpha, beta, depth-1 )) except IndexError: print(\"error-----------------------------------------------------------------------------\")", "# ########## # [Marking Scheme] # Points to note: # The way to", "in range(len(mat)-1): #check up/down entries on last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over'", "identical # 1 mark for creating the correct matrix def new_game(n): matrix =", "will be their solution if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return 'not over' for i", "col in range(len(mat[0])): if mat[row][col] == 0: emptySquareList.append([row, col]) return emptySquareList def getMaxTile(mat):", "= -((abs(x - 0) + abs(y - 0)) * max_tile) else: dis =", "idea how to grade this one at the moment. I have it pegged", "return emptySquareList def getMaxTile(mat): maxTile = 0 for x in range(len(mat)): for y", "depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) minUtility = float('inf') emptyCells", "= max(maxTile, mat[x][y]) return maxTile def heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat)) score = monotonicity(mat)*1.5", "= 0 emptyCells = empty_cells(mat) children = [] for c in emptyCells: gridCopy", "c[0], c[1], 4) children.append(gridCopy) for child in children: sum_value+= expectimax(child, depth-1, True) number+=1", "max(maxTile, mat[x][y]) return maxTile def heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat)) score = monotonicity(mat)*1.5 +", "return minimize(grid, startDepth) def maximize(mat, depth): if game_state(mat)=='lose' or depth == 0: return", "########### # [Marking Scheme] # Points to note: # 0 marks for completely", "<= alpha: break beta = min(minUtility, beta) # print minUtility return minUtility def", "down(game) # down(game) elif direction == \"left\": return left(game) elif direction==\"right\": return right(game)", "sum(scores)/len(scores) def expectimax(mat, depth, maximizer): if depth==0: return heuristic_score(mat) if maximizer: currentValue =", "2 marks for correct solutions that work for all sizes of matrices def", "correct so... # Check the down one. Reverse/transpose if ordered wrongly will give", "= empty_cells(mat) children = [] for c in emptyCells: gridCopy = deepcopy(mat) gridCopy", "alpha) if alpha >= beta: break return maxUtility def alphaBeta(grid, max, startDepth): if", "startDepth) else: return a_minimize(grid, -float('inf'), float('inf'), startDepth) def minimax(grid, max, startDepth): if max:", "matrix def new_game(n): matrix = [] for i in range(n): matrix.append([0] * n)", "def merge(mat): score = 0 done=False for i in range(4): for j in", "dis = -((abs(x - 0) + abs(y - 0)) * (max_tile / 2))", "= min(minUtility, a_maximize(child, alpha, beta, depth - 1)) if minUtility <= alpha: break", "for child in children: minUtility = min(minUtility, a_maximize(child, alpha, beta, depth - 1))", "Basically if they can solve one side, and use transpose and reverse correctly", "directions = ['up', 'down', 'left', 'right'] direction = directions[random.randint(0, len(directions) - 1)] newMat", "depth - 1)) except IndexError: continue return maxUtility def minimize(mat, depth): if game_state(mat)=='lose'", "= deepcopy(mat) gameScore = initialScore while game_state(newMat)!='lose': try: newMat, done, score = move(newMat,", "in range(4): if mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count: done=True count+=1 return (new,done) def merge(mat):", "# Task 2b # ########### # [Marking Scheme] # Points to note: #", "return heuristic_score(mat) maxUtility = -float('inf') d = ['up', 'down', 'left', 'right'] for direction", "* (max_tile / 2)) break return dis def a_maximize(mat, alpha, beta, depth): if", "0) + abs(y - 0)) * (max_tile / 2)) break return dis def", "new[i][count]=mat[i][j] if j!=count: done=True count+=1 return (new,done) def merge(mat): score = 0 done=False", "return maxUtility def minimize(mat, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat)", "two of the three conditions # 3 marks for correct checking def game_state(mat):", "Points to note: # Matrix elements must be equal but not identical #", "exceptions but most likely this will be their solution if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]:", "The way to do movement is compress -> merge -> compress again #", "not identical # 0 marks for completely wrong solutions # 1 mark for", "= max(maxUtility, a_minimize(c, alpha, beta, depth-1 )) except IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha =", "direction in d: c = deepcopy(mat) try: c, done = move(c, direction) if", "1)) if minUtility <= alpha: break beta = min(minUtility, beta) # print minUtility", "gameScore+=score+heuristic_score(mat) except IndexError: break scores.append(gameScore) return sum(scores)/len(scores) def expectimax(mat, depth, maximizer): if depth==0:", "def expectimax(mat, depth, maximizer): if depth==0: return heuristic_score(mat) if maximizer: currentValue = -1", "wrong solutions # 1 mark for getting only one condition correct # 2", "return maximize(grid, startDepth) else: return minimize(grid, startDepth) def maximize(mat, depth): if game_state(mat)=='lose' or", "to note: # Matrix elements must be equal but not identical # 0", "# [Marking Scheme] # Points to note: # Must ensure that it is", "startDepth): if max: return a_maximize(grid, -float('inf'), float('inf'), startDepth) else: return a_minimize(grid, -float('inf'), float('inf'),", "direction == \"left\": return left(game) elif direction==\"right\": return right(game) def up(game): # print(\"up\")", "= deepcopy(mat) try: c, done = move(c, direction) if done: maxUtility = max(maxUtility,", "matrix after shifting left game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1]", "########### # [Marking Scheme] # Points to note: # Must ensure that it", "set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children: minUtility = min(minUtility, maximize(child,", "< 1024: dis = -((abs(x - 0) + abs(y - 0)) * max_tile)", "range(len(mat)): for j in range(len(mat[0])): if mat[i][j]==2048: return 'win' for i in range(len(mat)-1):", "check the row on the right and below for j in range(len(mat[0])-1): #more", "children.append(gridCopy) for child in children: sum_value+= expectimax(child, depth-1, True) number+=1 if number ==", "range(len(mat)): for y in range(len(mat[x])): maxTile = max(maxTile, mat[x][y]) return maxTile def heuristic_score(mat):", "# Task 1b # ########### # [Marking Scheme] # Points to note: #", "over' return 'lose' ########### # Task 2a # ########### # [Marking Scheme] #", "correct matrix def new_game(n): matrix = [] for i in range(n): matrix.append([0] *", "return (new,done) def merge(mat): score = 0 done=False for i in range(4): for", "in range(len(mat[x])): maxTile = max(maxTile, mat[x][y]) return maxTile def heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat))", "movement is compress -> merge -> compress again # Basically if they can", "'left', 'right'] direction = directions[random.randint(0, len(directions) - 1)] newMat = deepcopy(mat) gameScore =", "expectimax(newBoard, depth - 1, False) if calculatedValue > currentValue: currentValue = calculatedValue return", "return 'lose' ########### # Task 2a # ########### # [Marking Scheme] # Points", "in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new ########### # Task 2b # ########### # [Marking", "(sum_value/number) def set_tile(mat, row, col, value): \"\"\" Set the tile at position row,", "are commented out to allow us to run your # code easily while", "done, score = move(newBoard, direction) calculatedValue = expectimax(newBoard, depth - 1, False) if", "# Note that written answers are commented out to allow us to run", "mark for creating the correct loop def new_tile(mat): seq = [2] * 90", "= deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children:", "False) if calculatedValue > currentValue: currentValue = calculatedValue return currentValue else: number =", "mark for solutions that show general understanding # 2 marks for correct solutions", "sizes of matrices def transpose(mat): new=[] for i in range(len(mat[0])): new.append([]) for j", "break scores.append(gameScore) return sum(scores)/len(scores) def expectimax(mat, depth, maximizer): if depth==0: return heuristic_score(mat) if", "three conditions # 3 marks for correct checking def game_state(mat): for i in", "cells. \"\"\" emptySquareList = [] for row in range(len(mat)): for col in range(len(mat[0])):", "again # Basically if they can solve one side, and use transpose and", "def a_minimize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat)", "8 (which gives you like, # 2 per up/down/left/right?) But if you get", "matrix.append([0] * n) return matrix ########### # Task 1b # ########### # [Marking", "j!=count: done=True count+=1 return (new,done) def merge(mat): score = 0 done=False for i", "gridCopy = set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children: sum_value+= expectimax(child,", "# print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0]", "done=False for i in range(4): count=0 for j in range(4): if mat[i][j]!=0: new[i][count]=mat[i][j]", "Check the down one. Reverse/transpose if ordered wrongly will give you wrong result.", "depth - 1, False) if calculatedValue > currentValue: currentValue = calculatedValue return currentValue", "2 mat[i][j]*=2 mat[i][j+1]=0 done=True return (mat,done, score) def empty_cells(mat): \"\"\" Return a list", "row in range(len(mat)): for col in range(len(mat[0])): if mat[row][col] == 0: emptySquareList.append([row, col])", "in children: minUtility = min(minUtility, a_maximize(child, alpha, beta, depth - 1)) if minUtility", "1)) except IndexError: continue return maxUtility def minimize(mat, depth): if game_state(mat)=='lose' or depth", "deepcopy(mat) gameScore = initialScore while game_state(newMat)!='lose': try: newMat, done, score = move(newMat, direction)", "they should # be able to solve the entire thing just by flipping", "grading your problem set. from random import * from copy import deepcopy import", "maxUtility def alphaBeta(grid, max, startDepth): if max: return a_maximize(grid, -float('inf'), float('inf'), startDepth) else:", "monotonicity_score += grid[row][column] * grid_mask[row][column] return monotonicity_score def distance(mat, max_tile): dis = None", "in range(3): if mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score += mat[i][j] * 2 mat[i][j]*=2 mat[i][j+1]=0", "while grading your problem set. from random import * from copy import deepcopy", "one. Reverse/transpose if ordered wrongly will give you wrong result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]", "if max_tile == mat[x][y]: if max_tile < 1024: dis = -((abs(x - 0)", "done: maxUtility = max(maxUtility, minimize(c, depth - 1)) except IndexError: continue return maxUtility", "they can solve one side, and use transpose and reverse correctly they should", "depth - 1)) if minUtility <= alpha: break beta = min(minUtility, beta) #", "(game,done, score) def down(game): # print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2]", "\"\"\" mat[row][col] = value return mat def move(game, direction): if(direction==\"up\"): return up(game) elif", "1]] monotonicity_score = 0 for row in range(3): for column in range(3): monotonicity_score", "dis def a_maximize(mat, alpha, beta, depth): if game_state(mat)=='lose' or depth == 0: return", "True) return (sum_value/number) def set_tile(mat, row, col, value): \"\"\" Set the tile at", "#to check the left/right entries on the last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not", "# 1 mark for solutions that show general understanding # 2 marks for", "in d: c = deepcopy(mat) try: c, done = move(c, direction) if done:", "-float('inf'), float('inf'), startDepth) def minimax(grid, max, startDepth): if max: return maximize(grid, startDepth) else:", "minUtility return minUtility def montecarlo(mat, initialScore): scores = [] for i in range(0,", "# ########### # [Marking Scheme] # Points to note: # Must ensure that", "solutions that work for all sizes of matrices def transpose(mat): new=[] for i", "# return matrix after shifting up game=transpose(game) game,done=cover_up(game) temp=merge(game) game=temp[0] done=done or temp[1]", "left/right entries on the last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over' for j", "done=done or temp[1] game=cover_up(game)[0] return (game,done, score) def right(game): # print(\"right\") # return", "minUtility = min(minUtility, a_maximize(child, alpha, beta, depth - 1)) if minUtility <= alpha:", "to check the row on the right and below for j in range(len(mat[0])-1):", "min(minUtility, maximize(child, depth - 1)) # print minUtility return minUtility def a_minimize(mat, alpha,", "[] for i in range(0, 100): directions = ['up', 'down', 'left', 'right'] direction", "if number == 0: return expectimax(mat, depth-1, True) return (sum_value/number) def set_tile(mat, row,", "understanding # 2 marks for correct solutions that work for all sizes of", "sum_value = 0 emptyCells = empty_cells(mat) children = [] for c in emptyCells:", "import math import random ####### #Task 1a# ####### # [Marking Scheme] # Points", "* grid_mask[row][column] return monotonicity_score def distance(mat, max_tile): dis = None for x in", "j in range(len(mat)-1): #check up/down entries on last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not", "return heuristic_score(mat) minUtility = float('inf') emptyCells = empty_cells(mat) children = [] for c", "solution if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return 'not over' for i in range(len(mat)): #check", "max(maxUtility, a_minimize(c, alpha, beta, depth-1 )) except IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha = max(maxUtility,", "mat def move(game, direction): if(direction==\"up\"): return up(game) elif direction==\"down\": return down(game) # down(game)", "new.append([]) for j in range(len(mat)): new[i].append(mat[j][i]) return new ########## # Task 3 #", "# print minUtility return minUtility def a_minimize(mat, alpha, beta, depth): if game_state(mat)=='lose' or", "calculatedValue > currentValue: currentValue = calculatedValue return currentValue else: number = 0 sum_value", "= temp[2] done=done or temp[1] game=cover_up(game)[0] return (game,done, score) def right(game): # print(\"right\")", "'not over' for k in range(len(mat)-1): #to check the left/right entries on the", "the last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over' for j in range(len(mat)-1): #check", "move(newBoard, direction) calculatedValue = expectimax(newBoard, depth - 1, False) if calculatedValue > currentValue:", "0 marks for completely wrong solutions # 1 mark for getting only one", "--- Programming Methodology # # Mission N Solutions # # Note that written", "grid_mask[row][column] return monotonicity_score def distance(mat, max_tile): dis = None for x in range(len(mat)):", "startDepth) else: return minimize(grid, startDepth) def maximize(mat, depth): if game_state(mat)=='lose' or depth ==", "over' for j in range(len(mat)-1): #check up/down entries on last column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]:", "direction): if(direction==\"up\"): return up(game) elif direction==\"down\": return down(game) # down(game) elif direction ==", "allow us to run your # code easily while grading your problem set.", "marks for getting two of the three conditions # 3 marks for correct", "directions[random.randint(0, len(directions) - 1)] newMat = deepcopy(mat) gameScore = initialScore while game_state(newMat)!='lose': try:", "children.append(gridCopy) for child in children: minUtility = min(minUtility, maximize(child, depth - 1)) #", "= set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children: sum_value+= expectimax(child, depth-1,", "emptyCells = empty_cells(mat) children = [] for c in emptyCells: gridCopy = deepcopy(mat)", "matrix after shifting right game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or", "def transpose(mat): new=[] for i in range(len(mat[0])): new.append([]) for j in range(len(mat)): new[i].append(mat[j][i])", "Note that written answers are commented out to allow us to run your", "64, 16], [256, 64, 16, 4], [64, 16, 4, 1]] monotonicity_score = 0", "j in range(len(mat[0])-1): #more elegant to use exceptions but most likely this will", "to do movement is compress -> merge -> compress again # Basically if", "CS1010FC --- Programming Methodology # # Mission N Solutions # # Note that", "# 2 per up/down/left/right?) But if you get one correct likely to get", "from copy import deepcopy import math import random ####### #Task 1a# ####### #", "scores = [] for i in range(0, 100): directions = ['up', 'down', 'left',", "deepcopy import math import random ####### #Task 1a# ####### # [Marking Scheme] #", "(max_tile / 2)) break return dis def a_maximize(mat, alpha, beta, depth): if game_state(mat)=='lose'", "tile at position row, col to have the given value. \"\"\" mat[row][col] =", "ordered wrongly will give you wrong result. def cover_up(mat): new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i", "mat[emptySquare[0]][emptySquare[1]] = newTile return mat ########### # Task 1c # ########### # [Marking", "n) return matrix ########### # Task 1b # ########### # [Marking Scheme] #", "3 marks for correct checking def game_state(mat): for i in range(len(mat)): for j", "= float('inf') emptyCells = empty_cells(mat) children = [] for c in emptyCells: gridCopy", "- 0) + abs(y - 0)) * max_tile) else: dis = -((abs(x -", "mat[i][j]==0: return 'not over' for k in range(len(mat)-1): #to check the left/right entries", "one at the moment. I have it pegged to 8 (which gives you", "shifting right game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0]", "just by flipping the matrix around # No idea how to grade this", "score) def down(game): # print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done", "elegant to use exceptions but most likely this will be their solution if", "for j in range(len(mat[0])): new[i].append(mat[i][len(mat[0])-j-1]) return new ########### # Task 2b # ###########", "for child in children: sum_value+= expectimax(child, depth-1, True) number+=1 if number == 0:", "[] for c in emptyCells: gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1],", "game_state(mat)=='lose' or depth == 0: return heuristic_score(mat) minUtility = float('inf') emptyCells = empty_cells(mat)", "x in range(len(mat)): for y in range(len(mat[x])): maxTile = max(maxTile, mat[x][y]) return maxTile", "in range(len(mat[0])): new.append([]) for j in range(len(mat)): new[i].append(mat[j][i]) return new ########## # Task", "# Matrix elements must be equal but not identical # 0 marks for", "16, 4, 1]] monotonicity_score = 0 for row in range(3): for column in", "print minUtility return minUtility def montecarlo(mat, initialScore): scores = [] for i in", "emptySquareList = [] for row in range(len(mat)): for col in range(len(mat[0])): if mat[row][col]", "gives you like, # 2 per up/down/left/right?) But if you get one correct", "range(3): for column in range(3): monotonicity_score += grid[row][column] * grid_mask[row][column] return monotonicity_score def", "merge(mat): score = 0 done=False for i in range(4): for j in range(3):", "def down(game): # print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or", "i in range(4): count=0 for j in range(4): if mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count:", "newMat, done, score = move(newMat, direction) newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError: break", "= ['up', 'down', 'left', 'right'] for direction in d: newBoard = deepcopy(mat) newBoard,", "+ getMaxTile(mat) return score def monotonicity(grid): grid_mask = [[2048, 1024, 256, 64], [1024,", "for getting only one condition correct # 2 marks for getting two of", "that work for all sizes of matrices def transpose(mat): new=[] for i in", "list of empty cells. \"\"\" emptySquareList = [] for row in range(len(mat)): for", "mat ########### # Task 1c # ########### # [Marking Scheme] # Points to", "for correct checking def game_state(mat): for i in range(len(mat)): for j in range(len(mat[0])):", "code easily while grading your problem set. from random import * from copy", "for i in range(4): count=0 for j in range(4): if mat[i][j]!=0: new[i][count]=mat[i][j] if", "and use transpose and reverse correctly they should # be able to solve", "- 0)) * max_tile) else: dis = -((abs(x - 0) + abs(y -", "else: return a_minimize(grid, -float('inf'), float('inf'), startDepth) def minimax(grid, max, startDepth): if max: return", "solve one side, and use transpose and reverse correctly they should # be", "# Task 3 # ########## # [Marking Scheme] # Points to note: #", "return a_minimize(grid, -float('inf'), float('inf'), startDepth) def minimax(grid, max, startDepth): if max: return maximize(grid,", "temp=merge(game) game=temp[0] done=done or temp[1] score = temp[2] game=cover_up(game)[0] game=transpose(game) return (game,done, score)", "done=done or temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done, score) def left(game): # print(\"left\") #", "flipping the matrix around # No idea how to grade this one at", "but not identical # 1 mark for creating the correct matrix def new_game(n):", "= [[2048, 1024, 256, 64], [1024, 256, 64, 16], [256, 64, 16, 4],", "[Marking Scheme] # Points to note: # Must ensure that it is created", "alpha, beta, depth - 1)) if minUtility <= alpha: break beta = min(minUtility,", "down one. Reverse/transpose if ordered wrongly will give you wrong result. def cover_up(mat):", "easily while grading your problem set. from random import * from copy import", "identical # 0 marks for completely wrong solutions # 1 mark for getting", "for i in range(len(mat)-1): #intentionally reduced to check the row on the right", "to have the given value. \"\"\" mat[row][col] = value return mat def move(game,", "-float('inf') d = ['up', 'down', 'left', 'right'] for direction in d: c =", "== 0: return heuristic_score(mat) minUtility = float('inf') emptyCells = empty_cells(mat) children = []", "for i in range(n): matrix.append([0] * n) return matrix ########### # Task 1b", "to grade this one at the moment. I have it pegged to 8", "score = temp[2] done=done or temp[1] game=cover_up(game)[0] return (game,done, score) def right(game): #", "for creating the correct matrix def new_game(n): matrix = [] for i in", "But if you get one correct likely to get all correct so... #", "Task 1c # ########### # [Marking Scheme] # Points to note: # Matrix", "for row in range(len(mat)): for col in range(len(mat[0])): if mat[row][col] == 0: emptySquareList.append([row,", "number = 0 sum_value = 0 emptyCells = empty_cells(mat) children = [] for", "# 0 marks for completely incorrect solutions # 1 mark for solutions that", "choice(seq) emptySquareList = empty_cells(mat) emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile return mat ###########", "for j in range(len(mat[0])): if mat[i][j]==2048: return 'win' for i in range(len(mat)-1): #intentionally", "currentValue: currentValue = calculatedValue return currentValue else: number = 0 sum_value = 0", "return expectimax(mat, depth-1, True) return (sum_value/number) def set_tile(mat, row, col, value): \"\"\" Set", "completely incorrect solutions # 1 mark for solutions that show general understanding #", "+ + getMaxTile(mat) return score def monotonicity(grid): grid_mask = [[2048, 1024, 256, 64],", "- 1)) except IndexError: continue return maxUtility def minimize(mat, depth): if game_state(mat)=='lose' or", "print(\"left\") # return matrix after shifting left game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2]", "show general understanding # 2 marks for correct solutions that work for all", "# Points to note: # Matrix elements must be equal but not identical", "sizes of matrices def reverse(mat): new=[] for i in range(len(mat)): new.append([]) for j", "over' for k in range(len(mat)-1): #to check the left/right entries on the last", "########## # Task 3 # ########## # [Marking Scheme] # Points to note:", "* 2 mat[i][j]*=2 mat[i][j+1]=0 done=True return (mat,done, score) def empty_cells(mat): \"\"\" Return a", "i in range(len(mat[0])): new.append([]) for j in range(len(mat)): new[i].append(mat[j][i]) return new ########## #", "maxTile = 0 for x in range(len(mat)): for y in range(len(mat[x])): maxTile =", "minimize(grid, startDepth) def maximize(mat, depth): if game_state(mat)=='lose' or depth == 0: return heuristic_score(mat)", "i in range(len(mat)): for j in range(len(mat[0])): if mat[i][j]==2048: return 'win' for i", "set_tile(mat, row, col, value): \"\"\" Set the tile at position row, col to", "def up(game): # print(\"up\") # return matrix after shifting up game=transpose(game) game,done=cover_up(game) temp=merge(game)", "c[0], c[1], 2) children.append(gridCopy) gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 4)", "mat[x][y]) return maxTile def heuristic_score(mat): number_of_empty_cells = len(empty_cells(mat)) score = monotonicity(mat)*1.5 + number_of_empty_cells*2", "temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=transpose(reverse(game)) return (game,done, score)", "gridCopy = set_tile(gridCopy, c[0], c[1], 2) children.append(gridCopy) gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy,", "score = temp[2] game=cover_up(game)[0] game=transpose(game) return (game,done, score) def down(game): # print(\"down\") game=reverse(transpose(game))", "new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]] done=False for i in range(4): count=0 for j in range(4): if mat[i][j]!=0:", "not identical # 1 mark for creating the correct matrix def new_game(n): matrix", "min(minUtility, beta) # print minUtility return minUtility def montecarlo(mat, initialScore): scores = []", "the moment. I have it pegged to 8 (which gives you like, #", "Matrix elements must be equal but not identical # 0 marks for completely", "depth - 1)) # print minUtility return minUtility def a_minimize(mat, alpha, beta, depth):", "d: c = deepcopy(mat) try: c, done = move(c, direction) if done: maxUtility", "No idea how to grade this one at the moment. I have it", "gridCopy = set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children: minUtility =", "-1 d = ['up', 'down', 'left', 'right'] for direction in d: newBoard =", "Task 2b # ########### # [Marking Scheme] # Points to note: # 0", "\"left\": return left(game) elif direction==\"right\": return right(game) def up(game): # print(\"up\") # return", "newMat = new_tile(newMat) gameScore+=score+heuristic_score(mat) except IndexError: break scores.append(gameScore) return sum(scores)/len(scores) def expectimax(mat, depth,", "float('inf'), startDepth) else: return a_minimize(grid, -float('inf'), float('inf'), startDepth) def minimax(grid, max, startDepth): if", "moment. I have it pegged to 8 (which gives you like, # 2", "return up(game) elif direction==\"down\": return down(game) # down(game) elif direction == \"left\": return", "be their solution if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return 'not over' for i in", "solutions # 1 mark for solutions that show general understanding # 2 marks", "children: minUtility = min(minUtility, a_maximize(child, alpha, beta, depth - 1)) if minUtility <=", "1a# ####### # [Marking Scheme] # Points to note: # Matrix elements must", "continue alpha = max(maxUtility, alpha) if alpha >= beta: break return maxUtility def", "-> compress again # Basically if they can solve one side, and use", "elements must be equal but not identical # 0 marks for completely wrong", "column if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]: return 'not over' return 'lose' ########### # Task 2a #", "len(empty_cells(mat)) score = monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat) return score def monotonicity(grid):", "= initialScore while game_state(newMat)!='lose': try: newMat, done, score = move(newMat, direction) newMat =", "Scheme] # Points to note: # Matrix elements must be equal but not", "# 1 mark for getting only one condition correct # 2 marks for", "your problem set. from random import * from copy import deepcopy import math", "# Points to note: # Must ensure that it is created on a", "# Task 2a # ########### # [Marking Scheme] # Points to note: #", "mat[i][j]!=0: new[i][count]=mat[i][j] if j!=count: done=True count+=1 return (new,done) def merge(mat): score = 0", "move(game, direction): if(direction==\"up\"): return up(game) elif direction==\"down\": return down(game) # down(game) elif direction", "max: return a_maximize(grid, -float('inf'), float('inf'), startDepth) else: return a_minimize(grid, -float('inf'), float('inf'), startDepth) def", "-((abs(x - 0) + abs(y - 0)) * max_tile) else: dis = -((abs(x", "in range(0, 100): directions = ['up', 'down', 'left', 'right'] direction = directions[random.randint(0, len(directions)", "0: emptySquareList.append([row, col]) return emptySquareList def getMaxTile(mat): maxTile = 0 for x in", "Task 1b # ########### # [Marking Scheme] # Points to note: # Must", "= [2] * 90 + [4] newTile = choice(seq) emptySquareList = empty_cells(mat) emptySquare", "newTile = choice(seq) emptySquareList = empty_cells(mat) emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile return", "your # code easily while grading your problem set. from random import *", "have it pegged to 8 (which gives you like, # 2 per up/down/left/right?)", "'down', 'left', 'right'] for direction in d: newBoard = deepcopy(mat) newBoard, done, score", "return matrix after shifting right game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done", "for j in range(len(mat)): new[i].append(mat[j][i]) return new ########## # Task 3 # ##########", "####### #Task 1a# ####### # [Marking Scheme] # Points to note: # Matrix", "able to solve the entire thing just by flipping the matrix around #", "'right'] direction = directions[random.randint(0, len(directions) - 1)] newMat = deepcopy(mat) gameScore = initialScore", "minUtility = min(minUtility, maximize(child, depth - 1)) # print minUtility return minUtility def", "get one correct likely to get all correct so... # Check the down", "solutions that work for all sizes of matrices def reverse(mat): new=[] for i", "[4] newTile = choice(seq) emptySquareList = empty_cells(mat) emptySquare = choice(emptySquareList) mat[emptySquare[0]][emptySquare[1]] = newTile", "range(3): if mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score += mat[i][j] * 2 mat[i][j]*=2 mat[i][j+1]=0 done=True", "range(len(mat[0])): if mat[i][j]==2048: return 'win' for i in range(len(mat)-1): #intentionally reduced to check", "== 0: return heuristic_score(mat) maxUtility = -float('inf') d = ['up', 'down', 'left', 'right']", "game=temp[0] done=done or temp[1] score = temp[2] game=cover_up(game)[0] game=transpose(game) return (game,done, score) def", "run your # code easily while grading your problem set. from random import", "set_tile(gridCopy, c[0], c[1], 4) children.append(gridCopy) for child in children: minUtility = min(minUtility, a_maximize(child,", "matrices def transpose(mat): new=[] for i in range(len(mat[0])): new.append([]) for j in range(len(mat)):", "right(game): # print(\"right\") # return matrix after shifting right game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0]", "game=reverse(game) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=reverse(game) return", "Scheme] # Points to note: # Must ensure that it is created on", "in range(len(mat)): for y in range(len(mat[x])): maxTile = max(maxTile, mat[x][y]) return maxTile def", "use transpose and reverse correctly they should # be able to solve the", "children = [] for c in emptyCells: gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy,", "direction = directions[random.randint(0, len(directions) - 1)] newMat = deepcopy(mat) gameScore = initialScore while", "# Mission N Solutions # # Note that written answers are commented out", "the left/right entries on the last row if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]: return 'not over' for", "must be equal but not identical # 1 mark for creating the correct", "emptyCells: gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 2) children.append(gridCopy) gridCopy =", "direction==\"right\": return right(game) def up(game): # print(\"up\") # return matrix after shifting up", "direction==\"down\": return down(game) # down(game) elif direction == \"left\": return left(game) elif direction==\"right\":", "(game,done, score) def right(game): # print(\"right\") # return matrix after shifting right game=reverse(game)", "0 sum_value = 0 emptyCells = empty_cells(mat) children = [] for c in", "one correct likely to get all correct so... # Check the down one.", "for c in emptyCells: gridCopy = deepcopy(mat) gridCopy = set_tile(gridCopy, c[0], c[1], 2)", "= [] for i in range(n): matrix.append([0] * n) return matrix ########### #", "'left', 'right'] for direction in d: c = deepcopy(mat) try: c, done =", "newTile return mat ########### # Task 1c # ########### # [Marking Scheme] #", "####### # [Marking Scheme] # Points to note: # Matrix elements must be", "for k in range(len(mat)-1): #to check the left/right entries on the last row", "gameScore = initialScore while game_state(newMat)!='lose': try: newMat, done, score = move(newMat, direction) newMat", "#intentionally reduced to check the row on the right and below for j", "col to have the given value. \"\"\" mat[row][col] = value return mat def", "written answers are commented out to allow us to run your # code", "their solution if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]: return 'not over' for i in range(len(mat)):", "thing just by flipping the matrix around # No idea how to grade", "return monotonicity_score def distance(mat, max_tile): dis = None for x in range(len(mat)): if", "# Check the down one. Reverse/transpose if ordered wrongly will give you wrong", "for direction in d: newBoard = deepcopy(mat) newBoard, done, score = move(newBoard, direction)", "value return mat def move(game, direction): if(direction==\"up\"): return up(game) elif direction==\"down\": return down(game)", "score = 0 done=False for i in range(4): for j in range(3): if", "to solve the entire thing just by flipping the matrix around # No", "max_tile == mat[x][y]: if max_tile < 1024: dis = -((abs(x - 0) +", "print(\"down\") game=reverse(transpose(game)) game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done or temp[1] game=cover_up(game)[0] game=transpose(reverse(game))", "'lose' ########### # Task 2a # ########### # [Marking Scheme] # Points to", "# return matrix after shifting left game,done=cover_up(game) temp=merge(game) game=temp[0] score = temp[2] done=done", "in range(len(mat)): for col in range(len(mat[0])): if mat[row][col] == 0: emptySquareList.append([row, col]) return", "return sum(scores)/len(scores) def expectimax(mat, depth, maximizer): if depth==0: return heuristic_score(mat) if maximizer: currentValue", "2a # ########### # [Marking Scheme] # Points to note: # 0 marks", "beta, depth-1 )) except IndexError: print(\"error-----------------------------------------------------------------------------\") continue alpha = max(maxUtility, alpha) if alpha", "number_of_empty_cells*2 + + getMaxTile(mat) return score def monotonicity(grid): grid_mask = [[2048, 1024, 256,", "out to allow us to run your # code easily while grading your", "(mat,done, score) def empty_cells(mat): \"\"\" Return a list of empty cells. \"\"\" emptySquareList", "only one condition correct # 2 marks for getting two of the three", "to note: # Must ensure that it is created on a zero entry", "calculatedValue = expectimax(newBoard, depth - 1, False) if calculatedValue > currentValue: currentValue =", "a zero entry # 1 mark for creating the correct loop def new_tile(mat):", "for j in range(3): if mat[i][j]==mat[i][j+1] and mat[i][j]!=0: score += mat[i][j] * 2" ]
[ "MAXROWS = 1000000 MAXAGE = datetime.datetime.today().year - 20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset,", "{} in {}-{}'.format(cntry, year, month)) values = [UID, date, cntry, valuetype, val, False]", "############################################################## def createTableWithIndex(table, schema, id_field, time_field=''): '''Get existing ids or create table''' cartosql.createTable(table,", "OrderedDict, defaultdict import datetime import cartosql import requests import json # Constants LATEST_URL", "= OrderedDict([ ('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'), ('value_type', 'text'), ('num_people', 'numeric'), ('some_stats_confidential',", "} try: r = requests.patch(url = apiUrl, json = body, headers = headers)", "num_dropped = 0 if isinstance(max_age, datetime.datetime): max_age = max_age.isoformat() # 1. delete by", "{} for country of asylum {} in {}-{}. Value set to -9999. Error:", "headers = headers) logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code)) return 0", "UID not in existing_ids + new_ids: new_ids.append(UID) if month in unknown_vals[cntry]: logging.debug('Some stats", "year {}'.format(year)) insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create data about", "('num_people', 'numeric'), ('some_stats_confidential', 'text') ]) UID_FIELD = '_UID' TIME_FIELD = 'date' DATA_DIR =", "id_field): '''get ids from table''' r = cartosql.getFields(id_field, table, f='csv') return r.text.split('\\r\\n')[1:-1] def", "table)) def get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True) dates = r.text.split('\\r\\n')[1:-1] dates.sort()", "max_age=''): '''Delete excess rows by age or count''' num_dropped = 0 if isinstance(max_age,", "delete by age if max_age: r = cartosql.deleteRows(table, \"{} < '{}'\".format(time_field, max_age)) num_dropped", "'numeric'), ('some_stats_confidential', 'text') ]) UID_FIELD = '_UID' TIME_FIELD = 'date' DATA_DIR = 'data'", "CARTO_SCHEMA.values(), new_rows) # Decrement year year -= 1 except json.decoder.JSONDecodeError: logging.info('API is still", "= 'data' LOG_LEVEL = logging.INFO DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST = False # Limit", "{} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) # Decrement year year -= 1", "DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = { 'Content-Type':", "last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for cntry in data: for month, val in data[cntry].items(): date", "rows, drop older than 20yrs MAXROWS = 1000000 MAXAGE = datetime.datetime.today().year - 20", "Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0 try: asylums[obs['country_of_asylum']][obs['month']] +=", "except Exception as e: logging.error('[lastUpdated]: '+str(e)) def genUID(date, country, valuetype): '''Generate unique id'''", "'%Y-%m-%d %H:%M:%S') return most_recent_date def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing table')", "= requests.patch(url = apiUrl, json = body, headers = headers) logging.info('[lastUpdated]: SUCCESS, '+", "1 except json.decoder.JSONDecodeError: logging.info('API is still down.') num_new = len(new_ids) return num_new ##############################################################", "of asylum {} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']]", "r.json()['total_rows'] # 2. get sorted ids (old->new) r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv')", "UID_FIELD = '_UID' TIME_FIELD = 'date' DATA_DIR = 'data' LOG_LEVEL = logging.INFO DATE_FORMAT", "= cartosql.getFields(TIME_FIELD, table, f='csv', post=True) dates = r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d", "= defaultdict(list) unknown_vals_asylums = defaultdict(list) for obs in data: try: origins[obs['country_of_origin']][obs['month']] += obs['value']", "f='csv') return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows, time_field, max_age=''): '''Delete excess rows by age", "body, headers = headers) logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code)) return", "< '{}'\".format(time_field, max_age)) num_dropped = r.json()['total_rows'] # 2. get sorted ids (old->new) r", "rows if new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for cntry in data: for month, val", "drop older than 20yrs MAXROWS = 1000000 MAXAGE = datetime.datetime.today().year - 20 DATASET_ID", "most_recent_date def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS", "date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID = genUID(date, cntry, valuetype) if UID not", "{} in {}-{}'.format(cntry, year, month)) values = [UID, date, cntry, valuetype, val, True]", "= defaultdict(lambda: defaultdict(int)) unknown_vals_origins = defaultdict(list) unknown_vals_asylums = defaultdict(list) for obs in data:", "asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing value {} for country", "Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0 # 3. Create Unique IDs, create new", "IDs, create new rows new_rows = [] logging.debug('Create data about places of origin", "month=1, day=1)) # Get most recent update date most_recent_date = get_most_recent_date(CARTO_TABLE) lastUpdateDate(DATASET_ID, most_recent_date)", "in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0 #", "exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD) # 2. Iterively fetch, parse and post", "or 200 pages # 1. Fetch new data logging.info(\"Fetching data for year {}\".format(year))", "True] else: logging.debug('All known stats released for {} in {}-{}'.format(cntry, year, month)) values", "UID = genUID(date, cntry, valuetype) if UID not in existing_ids + new_ids: new_ids.append(UID)", "= '%Y-%m-%d' CLEAR_TABLE_FIRST = False # Limit 1M rows, drop older than 20yrs", "post new data num_new = processNewData(existing_ids) existing_count = num_new + len(existing_ids) logging.info('Total rows:", "status code '+str(r.status_code)) return 0 except Exception as e: logging.error('[lastUpdated]: '+str(e)) def genUID(date,", "each page; stop when no new results or 200 pages # 1. Fetch", "CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1. Check", "Fetch new data logging.info(\"Fetching data for year {}\".format(year)) r = requests.get(LATEST_URL.format(year=year)) data =", "logging.debug('data: {}'.format(data)) # 2. Collect Totals origins = defaultdict(lambda: defaultdict(int)) asylums = defaultdict(lambda:", "'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken') } body = { \"dataLastUpdated\": date.isoformat() } try: r", "cntry, valuetype, val, False] new_rows.append(values) def processNewData(existing_ids): ''' Iterively fetch parse and post", "e: logging.debug(\"Error processing value {} for country of origin {} in {}-{}. Value", "val in data[cntry].items(): date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID = genUID(date, cntry, valuetype)", "rows new_rows = [] logging.debug('Create data about places of origin for year {}'.format(year))", "logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'),", "= cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv') ids = r.text.split('\\r\\n')[1:-1] # 3. delete excess if", "defaultdict(lambda: defaultdict(int)) asylums = defaultdict(lambda: defaultdict(int)) unknown_vals_origins = defaultdict(list) unknown_vals_asylums = defaultdict(list) for", "pages # 1. Fetch new data logging.info(\"Fetching data for year {}\".format(year)) r =", "schema) cartosql.createIndex(table, id_field, unique=True) if time_field: cartosql.createIndex(table, time_field) def getIds(table, id_field): '''get ids", "months in the data, add to new rows if new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31]", "r.json()['total_rows'] if num_dropped: logging.info('Dropped {} old rows from {}'.format(num_dropped, table)) def get_most_recent_date(table): r", "down.') num_new = len(new_ids) return num_new ############################################################## # General logic for Carto #", "table existing_ids = [] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids') existing_ids = getIds(CARTO_TABLE, UID_FIELD)", "} body = { \"dataLastUpdated\": date.isoformat() } try: r = requests.patch(url = apiUrl,", "MAXROWS)) # 3. Remove old observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1)) #", "# 1. delete by age if max_age: r = cartosql.deleteRows(table, \"{} < '{}'\".format(time_field,", "rows from {}'.format(num_dropped, table)) def get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True) dates", "in unknown_vals[cntry]: logging.debug('Some stats confidental for {} in {}-{}'.format(cntry, year, month)) values =", "and post new data ''' year = datetime.datetime.today().year new_count = 1 new_ids =", "obs['value'] except Exception as e: logging.debug(\"Error processing value {} for country of origin", "MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1)) # Get most recent update date most_recent_date =", "by age if max_age: r = cartosql.deleteRows(table, \"{} < '{}'\".format(time_field, max_age)) num_dropped =", "r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True) dates = r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1],", "places of asylum for year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4. Insert", "'+ date.isoformat() +' status code '+str(r.status_code)) return 0 except Exception as e: logging.error('[lastUpdated]:", "over months in the data, add to new rows if new''' last_day =", "def processNewData(existing_ids): ''' Iterively fetch parse and post new data ''' year =", "len(new_ids) return num_new ############################################################## # General logic for Carto # should be the", "isinstance(max_age, datetime.datetime): max_age = max_age.isoformat() # 1. delete by age if max_age: r", "in data[cntry].items(): date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID = genUID(date, cntry, valuetype) if", "day=last_day[month-1]).strftime(date_format) UID = genUID(date, cntry, valuetype) if UID not in existing_ids + new_ids:", "for year {}'.format(year)) insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create data", "if len(ids) > max_rows: r = cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped += r.json()['total_rows'] if num_dropped:", "datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return most_recent_date def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing", "parse and post new data num_new = processNewData(existing_ids) existing_count = num_new + len(existing_ids)", "insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create data about places of", "date, cntry, valuetype, val, True] else: logging.debug('All known stats released for {} in", "= r.json() logging.debug('data: {}'.format(data)) # 2. Collect Totals origins = defaultdict(lambda: defaultdict(int)) asylums", "new rows new_rows = [] logging.debug('Create data about places of origin for year", "table''' cartosql.createTable(table, schema) cartosql.createIndex(table, id_field, unique=True) if time_field: cartosql.createIndex(table, time_field) def getIds(table, id_field):", "cartosql.getFields(id_field, table, f='csv') return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows, time_field, max_age=''): '''Delete excess rows", "sys from collections import OrderedDict, defaultdict import datetime import cartosql import requests import", "schema, id_field, time_field=''): '''Get existing ids or create table''' cartosql.createTable(table, schema) cartosql.createIndex(table, id_field,", "in {}-{}'.format(cntry, year, month)) values = [UID, date, cntry, valuetype, val, False] new_rows.append(values)", "= [] logging.debug('Create data about places of origin for year {}'.format(year)) insert_kwargs =", "os import logging import sys from collections import OrderedDict, defaultdict import datetime import", "len(existing_ids) logging.info('Total rows: {}, New: {}, Max: {}'.format( existing_count, num_new, MAXROWS)) # 3.", "date_format=DATE_FORMAT): '''Loop over months in the data, add to new rows if new'''", "new_rows) # Decrement year year -= 1 except json.decoder.JSONDecodeError: logging.info('API is still down.')", "and new_count: # get and parse each page; stop when no new results", "20yrs MAXROWS = 1000000 MAXAGE = datetime.datetime.today().year - 20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def", "for Carto # should be the same for most tabular datasets ############################################################## def", "Carto # should be the same for most tabular datasets ############################################################## def createTableWithIndex(table,", "fetch, parse and post new data num_new = processNewData(existing_ids) existing_count = num_new +", "CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([ ('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'), ('value_type',", "as e: logging.error('[lastUpdated]: '+str(e)) def genUID(date, country, valuetype): '''Generate unique id''' return '{}_{}_{}'.format(country,", "r.text.split('\\r\\n')[1:-1] # 3. delete excess if len(ids) > max_rows: r = cartosql.deleteRowsByIDs(table, ids[:-max_rows])", "1. Check if table exists and create table existing_ids = [] if cartosql.tableExists(CARTO_TABLE):", "= { \"dataLastUpdated\": date.isoformat() } try: r = requests.patch(url = apiUrl, json =", "# 4. Insert new rows new_count = len(new_rows) if new_count: logging.info('Pushing {} new", "unknown_vals_asylums = defaultdict(list) for obs in data: try: origins[obs['country_of_origin']][obs['month']] += obs['value'] except Exception", "r = requests.patch(url = apiUrl, json = body, headers = headers) logging.info('[lastUpdated]: SUCCESS,", "return num_new ############################################################## # General logic for Carto # should be the same", "max_age.isoformat() # 1. delete by age if max_age: r = cartosql.deleteRows(table, \"{} <", "origins[obs['country_of_origin']][obs['month']] += 0 try: asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing", "defaultdict import datetime import cartosql import requests import json # Constants LATEST_URL =", "+= 0 try: asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing value", "ids = r.text.split('\\r\\n')[1:-1] # 3. delete excess if len(ids) > max_rows: r =", "= datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID = genUID(date, cntry, valuetype) if UID not in", "datetime import cartosql import requests import json # Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE", "if new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for cntry in data: for month, val in", "json # Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([ ('_UID',", "= 1 new_ids = [] try: while year > MAXAGE and new_count: #", "insertIfNew(**insert_kwargs) # 4. Insert new rows new_count = len(new_rows) if new_count: logging.info('Pushing {}", "Create Unique IDs, create new rows new_rows = [] logging.debug('Create data about places", "new_rows.append(values) def processNewData(existing_ids): ''' Iterively fetch parse and post new data ''' year", "logic for Carto # should be the same for most tabular datasets ##############################################################", "try: r = requests.patch(url = apiUrl, json = body, headers = headers) logging.info('[lastUpdated]:", "date, cntry, valuetype, val, False] new_rows.append(values) def processNewData(existing_ids): ''' Iterively fetch parse and", "about places of asylum for year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4.", "logging.debug('Create data about places of origin for year {}'.format(year)) insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin',", "defaultdict(lambda: defaultdict(int)) unknown_vals_origins = defaultdict(list) unknown_vals_asylums = defaultdict(list) for obs in data: try:", "existing_count, num_new, MAXROWS)) # 3. Remove old observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1,", "datetime.datetime(year=MAXAGE, month=1, day=1)) # Get most recent update date most_recent_date = get_most_recent_date(CARTO_TABLE) lastUpdateDate(DATASET_ID,", "f='csv', post=True) dates = r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return most_recent_date", "{}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0 # 3. Create Unique IDs, create new rows", "if num_dropped: logging.info('Dropped {} old rows from {}'.format(num_dropped, table)) def get_most_recent_date(table): r =", "Remove old observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1)) # Get most recent", "'text'), ('num_people', 'numeric'), ('some_stats_confidential', 'text') ]) UID_FIELD = '_UID' TIME_FIELD = 'date' DATA_DIR", "Iterively fetch parse and post new data ''' year = datetime.datetime.today().year new_count =", "apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = { 'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken') } body =", "def deleteExcessRows(table, max_rows, time_field, max_age=''): '''Delete excess rows by age or count''' num_dropped", "= 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = { 'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken') } body = {", "or create table''' cartosql.createTable(table, schema) cartosql.createIndex(table, id_field, unique=True) if time_field: cartosql.createIndex(table, time_field) def", "max_rows, time_field, max_age=''): '''Delete excess rows by age or count''' num_dropped = 0", "existing_ids + new_ids: new_ids.append(UID) if month in unknown_vals[cntry]: logging.debug('Some stats confidental for {}", "new data logging.info(\"Fetching data for year {}\".format(year)) r = requests.get(LATEST_URL.format(year=year)) data = r.json()", "set to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0 # 3. Create Unique", "data: for month, val in data[cntry].items(): date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID =", "data ''' year = datetime.datetime.today().year new_count = 1 new_ids = [] try: while", "logging.INFO DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST = False # Limit 1M rows, drop older", "Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([ ('_UID', 'text'), ('date',", "valuetype): '''Generate unique id''' return '{}_{}_{}'.format(country, date, valuetype) def insertIfNew(data, year, valuetype, existing_ids,", "\"{} < '{}'\".format(time_field, max_age)) num_dropped = r.json()['total_rows'] # 2. get sorted ids (old->new)", "# 3. Remove old observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1)) # Get", "for most tabular datasets ############################################################## def createTableWithIndex(table, schema, id_field, time_field=''): '''Get existing ids", "return 0 except Exception as e: logging.error('[lastUpdated]: '+str(e)) def genUID(date, country, valuetype): '''Generate", "id_field, time_field=''): '''Get existing ids or create table''' cartosql.createTable(table, schema) cartosql.createIndex(table, id_field, unique=True)", "= 'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([ ('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'), ('value_type', 'text'),", "logging.info(\"Fetching data for year {}\".format(year)) r = requests.get(LATEST_URL.format(year=year)) data = r.json() logging.debug('data: {}'.format(data))", "in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0 try:", "= 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([ ('_UID', 'text'), ('date', 'timestamp'), ('country',", "new rows if new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for cntry in data: for month,", "stats confidental for {} in {}-{}'.format(cntry, year, month)) values = [UID, date, cntry,", "max_age: r = cartosql.deleteRows(table, \"{} < '{}'\".format(time_field, max_age)) num_dropped = r.json()['total_rows'] # 2.", "year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4. Insert new rows new_count =", "logging.info('Dropped {} old rows from {}'.format(num_dropped, table)) def get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD, table,", "{}, New: {}, Max: {}'.format( existing_count, num_new, MAXROWS)) # 3. Remove old observations", "existing ids') existing_ids = getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table {} does not exist, creating'.format(CARTO_TABLE))", "old observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1)) # Get most recent update", "except Exception as e: logging.debug(\"Error processing value {} for country of origin {}", "except Exception as e: logging.debug(\"Error processing value {} for country of asylum {}", "import datetime import cartosql import requests import json # Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}'", "date.isoformat() } try: r = requests.patch(url = apiUrl, json = body, headers =", "= defaultdict(lambda: defaultdict(int)) asylums = defaultdict(lambda: defaultdict(int)) unknown_vals_origins = defaultdict(list) unknown_vals_asylums = defaultdict(list)", "0 except Exception as e: logging.error('[lastUpdated]: '+str(e)) def genUID(date, country, valuetype): '''Generate unique", "e: logging.debug(\"Error processing value {} for country of asylum {} in {}-{}. Value", "> MAXAGE and new_count: # get and parse each page; stop when no", "to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0 # 3. Create Unique IDs,", "most tabular datasets ############################################################## def createTableWithIndex(table, schema, id_field, time_field=''): '''Get existing ids or", "exists and create table existing_ids = [] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids') existing_ids", "SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code)) return 0 except Exception as e:", "= [UID, date, cntry, valuetype, val, False] new_rows.append(values) def processNewData(existing_ids): ''' Iterively fetch", "{}'.format(year)) insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create data about places", "and parse each page; stop when no new results or 200 pages #", "'text'), ('date', 'timestamp'), ('country', 'text'), ('value_type', 'text'), ('num_people', 'numeric'), ('some_stats_confidential', 'text') ]) UID_FIELD", "id_field, unique=True) if time_field: cartosql.createIndex(table, time_field) def getIds(table, id_field): '''get ids from table'''", "new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) # Decrement year year -= 1 except", "1000000 MAXAGE = datetime.datetime.today().year - 20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date): apiUrl", "len(new_rows) if new_count: logging.info('Pushing {} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) # Decrement", "Exception as e: logging.debug(\"Error processing value {} for country of origin {} in", "Collect Totals origins = defaultdict(lambda: defaultdict(int)) asylums = defaultdict(lambda: defaultdict(int)) unknown_vals_origins = defaultdict(list)", "= 0 if isinstance(max_age, datetime.datetime): max_age = max_age.isoformat() # 1. delete by age", "Totals origins = defaultdict(lambda: defaultdict(int)) asylums = defaultdict(lambda: defaultdict(int)) unknown_vals_origins = defaultdict(list) unknown_vals_asylums", "logging.debug(\"Error processing value {} for country of origin {} in {}-{}. Value set", "unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0 # 3. Create Unique IDs, create new rows new_rows", "{}'.format(num_dropped, table)) def get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True) dates = r.text.split('\\r\\n')[1:-1]", "+' status code '+str(r.status_code)) return 0 except Exception as e: logging.error('[lastUpdated]: '+str(e)) def", "max_rows: r = cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped += r.json()['total_rows'] if num_dropped: logging.info('Dropped {} old", "apiUrl, json = body, headers = headers) logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status", "about places of origin for year {}'.format(year)) insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins", "= r.text.split('\\r\\n')[1:-1] # 3. delete excess if len(ids) > max_rows: r = cartosql.deleteRowsByIDs(table,", "logging.debug('Some stats confidental for {} in {}-{}'.format(cntry, year, month)) values = [UID, date,", "= cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped += r.json()['total_rows'] if num_dropped: logging.info('Dropped {} old rows from", "unique id''' return '{}_{}_{}'.format(country, date, valuetype) def insertIfNew(data, year, valuetype, existing_ids, new_ids, new_rows,", "import os import logging import sys from collections import OrderedDict, defaultdict import datetime", "logging.info('Total rows: {}, New: {}, Max: {}'.format( existing_count, num_new, MAXROWS)) # 3. Remove", "deleteExcessRows(table, max_rows, time_field, max_age=''): '''Delete excess rows by age or count''' num_dropped =", "cartosql.createIndex(table, id_field, unique=True) if time_field: cartosql.createIndex(table, time_field) def getIds(table, id_field): '''get ids from", "logging.debug(\"Error processing value {} for country of asylum {} in {}-{}. Value set", "json = body, headers = headers) logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code", "{}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0 # 3.", "lastUpdateDate(dataset, date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = { 'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken') }", "= [UID, date, cntry, valuetype, val, True] else: logging.debug('All known stats released for", "MAXAGE and new_count: # get and parse each page; stop when no new", "logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) #", "= processNewData(existing_ids) existing_count = num_new + len(existing_ids) logging.info('Total rows: {}, New: {}, Max:", "NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1. Check if table exists and create table", "'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create data about places of asylum for year {}'.format(year)) insert_kwargs.update(data=asylums,", "# 2. Iterively fetch, parse and post new data num_new = processNewData(existing_ids) existing_count", "fetch parse and post new data ''' year = datetime.datetime.today().year new_count = 1", "to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0 try: asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except", "table''' r = cartosql.getFields(id_field, table, f='csv') return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows, time_field, max_age=''):", "if max_age: r = cartosql.deleteRows(table, \"{} < '{}'\".format(time_field, max_age)) num_dropped = r.json()['total_rows'] #", "]) UID_FIELD = '_UID' TIME_FIELD = 'date' DATA_DIR = 'data' LOG_LEVEL = logging.INFO", "'''get ids from table''' r = cartosql.getFields(id_field, table, f='csv') return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table,", "creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD) # 2. Iterively fetch, parse and post new", "[] logging.debug('Create data about places of origin for year {}'.format(year)) insert_kwargs = {", "try: origins[obs['country_of_origin']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing value {} for", "json.decoder.JSONDecodeError: logging.info('API is still down.') num_new = len(new_ids) return num_new ############################################################## # General", "insertIfNew(**insert_kwargs) logging.debug('Create data about places of asylum for year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums)", "not in existing_ids + new_ids: new_ids.append(UID) if month in unknown_vals[cntry]: logging.debug('Some stats confidental", "dates = r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return most_recent_date def main():", "post=True) dates = r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return most_recent_date def", "sorted ids (old->new) r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv') ids = r.text.split('\\r\\n')[1:-1] #", "for year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4. Insert new rows new_count", "data logging.info(\"Fetching data for year {}\".format(year)) r = requests.get(LATEST_URL.format(year=year)) data = r.json() logging.debug('data:", "logging.debug('Create data about places of asylum for year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs)", "day=1)) # Get most recent update date most_recent_date = get_most_recent_date(CARTO_TABLE) lastUpdateDate(DATASET_ID, most_recent_date) logging.info('SUCCESS')", "year, valuetype, existing_ids, new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop over months in the data,", "processing value {} for country of asylum {} in {}-{}. Value set to", "old rows from {}'.format(num_dropped, table)) def get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True)", "excess if len(ids) > max_rows: r = cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped += r.json()['total_rows'] if", "# Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([ ('_UID', 'text'),", "{}'.format(data)) # 2. Collect Totals origins = defaultdict(lambda: defaultdict(int)) asylums = defaultdict(lambda: defaultdict(int))", "= { 'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken') } body = { \"dataLastUpdated\": date.isoformat() }", "import OrderedDict, defaultdict import datetime import cartosql import requests import json # Constants", "data = r.json() logging.debug('data: {}'.format(data)) # 2. Collect Totals origins = defaultdict(lambda: defaultdict(int))", "Check if table exists and create table existing_ids = [] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching", "CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) # Decrement year year -= 1 except json.decoder.JSONDecodeError: logging.info('API is", "or count''' num_dropped = 0 if isinstance(max_age, datetime.datetime): max_age = max_age.isoformat() # 1.", "createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD) # 2. Iterively fetch, parse and post new data", "r.json() logging.debug('data: {}'.format(data)) # 2. Collect Totals origins = defaultdict(lambda: defaultdict(int)) asylums =", "[31,28,31,30,31,30,31,31,30,31,30,31] for cntry in data: for month, val in data[cntry].items(): date = datetime.datetime(year=year,", "rows new_count = len(new_rows) if new_count: logging.info('Pushing {} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(),", "# General logic for Carto # should be the same for most tabular", "country of asylum {} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month'])", "table, f='csv') return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows, time_field, max_age=''): '''Delete excess rows by", "r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows, time_field, max_age=''): '''Delete excess rows by age or count'''", "count''' num_dropped = 0 if isinstance(max_age, datetime.datetime): max_age = max_age.isoformat() # 1. delete", "LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([ ('_UID', 'text'), ('date', 'timestamp'),", "def get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True) dates = r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date", "for country of asylum {} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e))", "import requests import json # Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA", "'''Loop over months in the data, add to new rows if new''' last_day", "r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv') ids = r.text.split('\\r\\n')[1:-1] # 3. delete excess", "Max: {}'.format( existing_count, num_new, MAXROWS)) # 3. Remove old observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD,", "origins[obs['country_of_origin']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing value {} for country", "= getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table {} does not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD,", "{} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0", "'{}'\".format(time_field, max_age)) num_dropped = r.json()['total_rows'] # 2. get sorted ids (old->new) r =", "0 # 3. Create Unique IDs, create new rows new_rows = [] logging.debug('Create", "{}, Max: {}'.format( existing_count, num_new, MAXROWS)) # 3. Remove old observations deleteExcessRows(CARTO_TABLE, MAXROWS,", "if isinstance(max_age, datetime.datetime): max_age = max_age.isoformat() # 1. delete by age if max_age:", "'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([ ('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'),", "Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0 # 3. Create", "values = [UID, date, cntry, valuetype, val, False] new_rows.append(values) def processNewData(existing_ids): ''' Iterively", "processNewData(existing_ids) existing_count = num_new + len(existing_ids) logging.info('Total rows: {}, New: {}, Max: {}'.format(", "confidental for {} in {}-{}'.format(cntry, year, month)) values = [UID, date, cntry, valuetype,", "num_new = len(new_ids) return num_new ############################################################## # General logic for Carto # should", "'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = { 'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken') } body = { \"dataLastUpdated\":", "for obs in data: try: origins[obs['country_of_origin']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error", "main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL',", "existing_ids = [] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids') existing_ids = getIds(CARTO_TABLE, UID_FIELD) else:", "num_dropped: logging.info('Dropped {} old rows from {}'.format(num_dropped, table)) def get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD,", "else: logging.debug('All known stats released for {} in {}-{}'.format(cntry, year, month)) values =", "if time_field: cartosql.createIndex(table, time_field) def getIds(table, id_field): '''get ids from table''' r =", "from collections import OrderedDict, defaultdict import datetime import cartosql import requests import json", "= datetime.datetime.today().year new_count = 1 new_ids = [] try: while year > MAXAGE", "existing_ids = getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table {} does not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA,", "valuetype, val, False] new_rows.append(values) def processNewData(existing_ids): ''' Iterively fetch parse and post new", "new_count = len(new_rows) if new_count: logging.info('Pushing {} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows)", "# Limit 1M rows, drop older than 20yrs MAXROWS = 1000000 MAXAGE =", "data about places of asylum for year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) #", "the data, add to new rows if new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for cntry", "# 2. Collect Totals origins = defaultdict(lambda: defaultdict(int)) asylums = defaultdict(lambda: defaultdict(int)) unknown_vals_origins", "= defaultdict(list) for obs in data: try: origins[obs['country_of_origin']][obs['month']] += obs['value'] except Exception as", "and create table existing_ids = [] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids') existing_ids =", "= False # Limit 1M rows, drop older than 20yrs MAXROWS = 1000000", "num_new + len(existing_ids) logging.info('Total rows: {}, New: {}, Max: {}'.format( existing_count, num_new, MAXROWS))", "('country', 'text'), ('value_type', 'text'), ('num_people', 'numeric'), ('some_stats_confidential', 'text') ]) UID_FIELD = '_UID' TIME_FIELD", "= body, headers = headers) logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))", "[UID, date, cntry, valuetype, val, True] else: logging.debug('All known stats released for {}", "UID_FIELD) else: logging.info('Table {} does not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD) #", "datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID = genUID(date, cntry, valuetype) if UID not in existing_ids", "cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1. Check if table exists", "for month, val in data[cntry].items(): date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID = genUID(date,", "num_new ############################################################## # General logic for Carto # should be the same for", "set to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0 try: asylums[obs['country_of_asylum']][obs['month']] += obs['value']", "ids') existing_ids = getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table {} does not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE,", "valuetype, existing_ids, new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop over months in the data, add", "new_ids.append(UID) if month in unknown_vals[cntry]: logging.debug('Some stats confidental for {} in {}-{}'.format(cntry, year,", "if new_count: logging.info('Pushing {} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) # Decrement year", "known stats released for {} in {}-{}'.format(cntry, year, month)) values = [UID, date,", "'Authorization': os.getenv('apiToken') } body = { \"dataLastUpdated\": date.isoformat() } try: r = requests.patch(url", "+ len(existing_ids) logging.info('Total rows: {}, New: {}, Max: {}'.format( existing_count, num_new, MAXROWS)) #", "table exists and create table existing_ids = [] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids')", "DATA_DIR = 'data' LOG_LEVEL = logging.INFO DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST = False #", "# 1. Check if table exists and create table existing_ids = [] if", "4. Insert new rows new_count = len(new_rows) if new_count: logging.info('Pushing {} new rows'.format(new_count))", "unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4. Insert new rows new_count = len(new_rows) if new_count: logging.info('Pushing", "as e: logging.debug(\"Error processing value {} for country of asylum {} in {}-{}.", "asylum {} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] +=", "def insertIfNew(data, year, valuetype, existing_ids, new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop over months in", "rows by age or count''' num_dropped = 0 if isinstance(max_age, datetime.datetime): max_age =", "while year > MAXAGE and new_count: # get and parse each page; stop", "num_dropped += r.json()['total_rows'] if num_dropped: logging.info('Dropped {} old rows from {}'.format(num_dropped, table)) def", "in data: try: origins[obs['country_of_origin']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing value", "data, add to new rows if new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for cntry in", "False] new_rows.append(values) def processNewData(existing_ids): ''' Iterively fetch parse and post new data '''", "Decrement year year -= 1 except json.decoder.JSONDecodeError: logging.info('API is still down.') num_new =", "{}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4. Insert new rows new_count = len(new_rows)", "processing value {} for country of origin {} in {}-{}. Value set to", "and post new data num_new = processNewData(existing_ids) existing_count = num_new + len(existing_ids) logging.info('Total", "logging.info('Pushing {} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) # Decrement year year -=", "country of origin {} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month'])", "cartosql.getFields(TIME_FIELD, table, f='csv', post=True) dates = r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S')", "getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table {} does not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD)", "Exception as e: logging.error('[lastUpdated]: '+str(e)) def genUID(date, country, valuetype): '''Generate unique id''' return", "-= 1 except json.decoder.JSONDecodeError: logging.info('API is still down.') num_new = len(new_ids) return num_new", "+= r.json()['total_rows'] if num_dropped: logging.info('Dropped {} old rows from {}'.format(num_dropped, table)) def get_most_recent_date(table):", "{ 'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken') } body = { \"dataLastUpdated\": date.isoformat() } try:", "{}'.format( existing_count, num_new, MAXROWS)) # 3. Remove old observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE,", "rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) # Decrement year year -= 1 except json.decoder.JSONDecodeError:", "released for {} in {}-{}'.format(cntry, year, month)) values = [UID, date, cntry, valuetype,", "('date', 'timestamp'), ('country', 'text'), ('value_type', 'text'), ('num_people', 'numeric'), ('some_stats_confidential', 'text') ]) UID_FIELD =", "cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped += r.json()['total_rows'] if num_dropped: logging.info('Dropped {} old rows from {}'.format(num_dropped,", "get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True) dates = r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date =", "date, valuetype) def insertIfNew(data, year, valuetype, existing_ids, new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop over", "datetime.datetime.today().year new_count = 1 new_ids = [] try: while year > MAXAGE and", "{}-{}'.format(cntry, year, month)) values = [UID, date, cntry, valuetype, val, False] new_rows.append(values) def", "max_age = max_age.isoformat() # 1. delete by age if max_age: r = cartosql.deleteRows(table,", "num_new, MAXROWS)) # 3. Remove old observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1))", "3. Remove old observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1)) # Get most", "r = cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped += r.json()['total_rows'] if num_dropped: logging.info('Dropped {} old rows", "= requests.get(LATEST_URL.format(year=year)) data = r.json() logging.debug('data: {}'.format(data)) # 2. Collect Totals origins =", "of origin {} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']]", "# should be the same for most tabular datasets ############################################################## def createTableWithIndex(table, schema,", "create table existing_ids = [] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids') existing_ids = getIds(CARTO_TABLE,", "= { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create data about places of asylum", "insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4. Insert new rows new_count = len(new_rows) if", "cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv') ids = r.text.split('\\r\\n')[1:-1] # 3. delete excess if len(ids)", "if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids') existing_ids = getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table {} does", "= datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return most_recent_date def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST:", "import sys from collections import OrderedDict, defaultdict import datetime import cartosql import requests", "time_field) def getIds(table, id_field): '''get ids from table''' r = cartosql.getFields(id_field, table, f='csv')", "order='{}'.format(time_field), f='csv') ids = r.text.split('\\r\\n')[1:-1] # 3. delete excess if len(ids) > max_rows:", "else: logging.info('Table {} does not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD) # 2.", "'data' LOG_LEVEL = logging.INFO DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST = False # Limit 1M", "data about places of origin for year {}'.format(year)) insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows,", "} insertIfNew(**insert_kwargs) logging.debug('Create data about places of asylum for year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum',", "3. delete excess if len(ids) > max_rows: r = cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped +=", "'text'), ('value_type', 'text'), ('num_people', 'numeric'), ('some_stats_confidential', 'text') ]) UID_FIELD = '_UID' TIME_FIELD =", "'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([ ('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'), ('value_type', 'text'), ('num_people',", "places of origin for year {}'.format(year)) insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins }", "for cntry in data: for month, val in data[cntry].items(): date = datetime.datetime(year=year, month=month,", "year -= 1 except json.decoder.JSONDecodeError: logging.info('API is still down.') num_new = len(new_ids) return", "collections import OrderedDict, defaultdict import datetime import cartosql import requests import json #", "num_dropped = r.json()['total_rows'] # 2. get sorted ids (old->new) r = cartosql.getFields('cartodb_id', table,", "when no new results or 200 pages # 1. Fetch new data logging.info(\"Fetching", "date.isoformat() +' status code '+str(r.status_code)) return 0 except Exception as e: logging.error('[lastUpdated]: '+str(e))", "excess rows by age or count''' num_dropped = 0 if isinstance(max_age, datetime.datetime): max_age", "{} does not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD) # 2. Iterively fetch,", "'''Get existing ids or create table''' cartosql.createTable(table, schema) cartosql.createIndex(table, id_field, unique=True) if time_field:", "LOG_LEVEL = logging.INFO DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST = False # Limit 1M rows,", "from table''' r = cartosql.getFields(id_field, table, f='csv') return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows, time_field,", "by age or count''' num_dropped = 0 if isinstance(max_age, datetime.datetime): max_age = max_age.isoformat()", "existing_ids, new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop over months in the data, add to", "month in unknown_vals[cntry]: logging.debug('Some stats confidental for {} in {}-{}'.format(cntry, year, month)) values", "= len(new_ids) return num_new ############################################################## # General logic for Carto # should be", "cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) # Decrement year year -= 1 except json.decoder.JSONDecodeError: logging.info('API", "+ new_ids: new_ids.append(UID) if month in unknown_vals[cntry]: logging.debug('Some stats confidental for {} in", "('some_stats_confidential', 'text') ]) UID_FIELD = '_UID' TIME_FIELD = 'date' DATA_DIR = 'data' LOG_LEVEL", "time_field, max_age=''): '''Delete excess rows by age or count''' num_dropped = 0 if", "same for most tabular datasets ############################################################## def createTableWithIndex(table, schema, id_field, time_field=''): '''Get existing", "('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'), ('value_type', 'text'), ('num_people', 'numeric'), ('some_stats_confidential', 'text') ])", "datasets ############################################################## def createTableWithIndex(table, schema, id_field, time_field=''): '''Get existing ids or create table'''", "Iterively fetch, parse and post new data num_new = processNewData(existing_ids) existing_count = num_new", "cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids') existing_ids = getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table {} does not", "('value_type', 'text'), ('num_people', 'numeric'), ('some_stats_confidential', 'text') ]) UID_FIELD = '_UID' TIME_FIELD = 'date'", "level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY'))", "cartosql.createTable(table, schema) cartosql.createIndex(table, id_field, unique=True) if time_field: cartosql.createIndex(table, time_field) def getIds(table, id_field): '''get", "date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = { 'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken') } body", "stop when no new results or 200 pages # 1. Fetch new data", "Insert new rows new_count = len(new_rows) if new_count: logging.info('Pushing {} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE,", "origins = defaultdict(lambda: defaultdict(int)) asylums = defaultdict(lambda: defaultdict(int)) unknown_vals_origins = defaultdict(list) unknown_vals_asylums =", "as e: logging.debug(\"Error processing value {} for country of origin {} in {}-{}.", "new_rows = [] logging.debug('Create data about places of origin for year {}'.format(year)) insert_kwargs", "val, False] new_rows.append(values) def processNewData(existing_ids): ''' Iterively fetch parse and post new data", "deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1)) # Get most recent update date most_recent_date", "datetime.datetime): max_age = max_age.isoformat() # 1. delete by age if max_age: r =", "create new rows new_rows = [] logging.debug('Create data about places of origin for", "logging import sys from collections import OrderedDict, defaultdict import datetime import cartosql import", "''' year = datetime.datetime.today().year new_count = 1 new_ids = [] try: while year", "value {} for country of asylum {} in {}-{}. Value set to -9999.", "post new data ''' year = datetime.datetime.today().year new_count = 1 new_ids = []", "1. delete by age if max_age: r = cartosql.deleteRows(table, \"{} < '{}'\".format(time_field, max_age))", "os.getenv('apiToken') } body = { \"dataLastUpdated\": date.isoformat() } try: r = requests.patch(url =", "# 3. delete excess if len(ids) > max_rows: r = cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped", "= num_new + len(existing_ids) logging.info('Total rows: {}, New: {}, Max: {}'.format( existing_count, num_new,", "for year {}\".format(year)) r = requests.get(LATEST_URL.format(year=year)) data = r.json() logging.debug('data: {}'.format(data)) # 2.", "month)) values = [UID, date, cntry, valuetype, val, False] new_rows.append(values) def processNewData(existing_ids): '''", "new_count: logging.info('Pushing {} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) # Decrement year year", "'''Generate unique id''' return '{}_{}_{}'.format(country, date, valuetype) def insertIfNew(data, year, valuetype, existing_ids, new_ids,", "new data ''' year = datetime.datetime.today().year new_count = 1 new_ids = [] try:", "= 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = { 'Content-Type': 'application/json',", "new_ids: new_ids.append(UID) if month in unknown_vals[cntry]: logging.debug('Some stats confidental for {} in {}-{}'.format(cntry,", "year year -= 1 except json.decoder.JSONDecodeError: logging.info('API is still down.') num_new = len(new_ids)", "year, month)) values = [UID, date, cntry, valuetype, val, False] new_rows.append(values) def processNewData(existing_ids):", "- 20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers =", "'+str(r.status_code)) return 0 except Exception as e: logging.error('[lastUpdated]: '+str(e)) def genUID(date, country, valuetype):", "origin {} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] +=", "year > MAXAGE and new_count: # get and parse each page; stop when", "'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create data about places of asylum for year {}'.format(year))", "tabular datasets ############################################################## def createTableWithIndex(table, schema, id_field, time_field=''): '''Get existing ids or create", "'%Y-%m-%d' CLEAR_TABLE_FIRST = False # Limit 1M rows, drop older than 20yrs MAXROWS", "stats released for {} in {}-{}'.format(cntry, year, month)) values = [UID, date, cntry,", "asylums[obs['country_of_asylum']][obs['month']] += 0 # 3. Create Unique IDs, create new rows new_rows =", "new rows new_count = len(new_rows) if new_count: logging.info('Pushing {} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(),", "+= 0 # 3. Create Unique IDs, create new rows new_rows = []", "'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1. Check if table exists and", "num_new = processNewData(existing_ids) existing_count = num_new + len(existing_ids) logging.info('Total rows: {}, New: {},", "r = cartosql.getFields(id_field, table, f='csv') return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows, time_field, max_age=''): '''Delete", "def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT", "{}\".format(year)) r = requests.get(LATEST_URL.format(year=year)) data = r.json() logging.debug('data: {}'.format(data)) # 2. Collect Totals", "3. Create Unique IDs, create new rows new_rows = [] logging.debug('Create data about", "'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create data about places of asylum for year", "r = cartosql.deleteRows(table, \"{} < '{}'\".format(time_field, max_age)) num_dropped = r.json()['total_rows'] # 2. get", "existing ids or create table''' cartosql.createTable(table, schema) cartosql.createIndex(table, id_field, unique=True) if time_field: cartosql.createIndex(table,", "unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0 try: asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error", "rows: {}, New: {}, Max: {}'.format( existing_count, num_new, MAXROWS)) # 3. Remove old", "-9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0 # 3. Create Unique IDs, create", "= cartosql.getFields(id_field, table, f='csv') return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows, time_field, max_age=''): '''Delete excess", "dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return most_recent_date def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING')", "try: asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing value {} for", "logging.info('Table {} does not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD) # 2. Iterively", "r = requests.get(LATEST_URL.format(year=year)) data = r.json() logging.debug('data: {}'.format(data)) # 2. Collect Totals origins", "'date' DATA_DIR = 'data' LOG_LEVEL = logging.INFO DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST = False", "MAXAGE = datetime.datetime.today().year - 20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date): apiUrl =", "new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop over months in the data, add to new rows", "should be the same for most tabular datasets ############################################################## def createTableWithIndex(table, schema, id_field,", "CARTO_SCHEMA, UID_FIELD, TIME_FIELD) # 2. Iterively fetch, parse and post new data num_new", "to new rows if new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for cntry in data: for", "'_UID' TIME_FIELD = 'date' DATA_DIR = 'data' LOG_LEVEL = logging.INFO DATE_FORMAT = '%Y-%m-%d'", "get sorted ids (old->new) r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv') ids = r.text.split('\\r\\n')[1:-1]", "parse and post new data ''' year = datetime.datetime.today().year new_count = 1 new_ids", "does not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD) # 2. Iterively fetch, parse", "datetime.datetime.today().year - 20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers", "cntry, valuetype, val, True] else: logging.debug('All known stats released for {} in {}-{}'.format(cntry,", "def getIds(table, id_field): '''get ids from table''' r = cartosql.getFields(id_field, table, f='csv') return", "1M rows, drop older than 20yrs MAXROWS = 1000000 MAXAGE = datetime.datetime.today().year -", "get and parse each page; stop when no new results or 200 pages", "UID_FIELD, TIME_FIELD) # 2. Iterively fetch, parse and post new data num_new =", "unique=True) if time_field: cartosql.createIndex(table, time_field) def getIds(table, id_field): '''get ids from table''' r", "= apiUrl, json = body, headers = headers) logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +'", "Limit 1M rows, drop older than 20yrs MAXROWS = 1000000 MAXAGE = datetime.datetime.today().year", "def createTableWithIndex(table, schema, id_field, time_field=''): '''Get existing ids or create table''' cartosql.createTable(table, schema)", "logging.debug('All known stats released for {} in {}-{}'.format(cntry, year, month)) values = [UID,", "{ 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create data about places of asylum for", "in existing_ids + new_ids: new_ids.append(UID) if month in unknown_vals[cntry]: logging.debug('Some stats confidental for", "= [] try: while year > MAXAGE and new_count: # get and parse", "most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return most_recent_date def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if", "genUID(date, country, valuetype): '''Generate unique id''' return '{}_{}_{}'.format(country, date, valuetype) def insertIfNew(data, year,", "TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1)) # Get most recent update date most_recent_date = get_most_recent_date(CARTO_TABLE)", "code '+str(r.status_code)) return 0 except Exception as e: logging.error('[lastUpdated]: '+str(e)) def genUID(date, country,", "valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4. Insert new rows new_count = len(new_rows) if new_count:", "age or count''' num_dropped = 0 if isinstance(max_age, datetime.datetime): max_age = max_age.isoformat() #", "logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1. Check if", "in the data, add to new rows if new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for", "value {} for country of origin {} in {}-{}. Value set to -9999.", "= headers) logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code)) return 0 except", "-9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0 try: asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except Exception", "existing_count = num_new + len(existing_ids) logging.info('Total rows: {}, New: {}, Max: {}'.format( existing_count,", "2. Iterively fetch, parse and post new data num_new = processNewData(existing_ids) existing_count =", "headers = { 'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken') } body = { \"dataLastUpdated\": date.isoformat()", "not exist, creating'.format(CARTO_TABLE)) createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD) # 2. Iterively fetch, parse and", "defaultdict(list) for obs in data: try: origins[obs['country_of_origin']][obs['month']] += obs['value'] except Exception as e:", "of origin for year {}'.format(year)) insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs)", "unknown_vals_origins = defaultdict(list) unknown_vals_asylums = defaultdict(list) for obs in data: try: origins[obs['country_of_origin']][obs['month']] +=", "20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = {", "getIds(table, id_field): '''get ids from table''' r = cartosql.getFields(id_field, table, f='csv') return r.text.split('\\r\\n')[1:-1]", "'timestamp'), ('country', 'text'), ('value_type', 'text'), ('num_people', 'numeric'), ('some_stats_confidential', 'text') ]) UID_FIELD = '_UID'", "return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows, time_field, max_age=''): '''Delete excess rows by age or", "new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop over months in the data, add to new", "year = datetime.datetime.today().year new_count = 1 new_ids = [] try: while year >", "(old->new) r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv') ids = r.text.split('\\r\\n')[1:-1] # 3. delete", "cntry in data: for month, val in data[cntry].items(): date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format)", "Unique IDs, create new rows new_rows = [] logging.debug('Create data about places of", "asylums = defaultdict(lambda: defaultdict(int)) unknown_vals_origins = defaultdict(list) unknown_vals_asylums = defaultdict(list) for obs in", "new_count = 1 new_ids = [] try: while year > MAXAGE and new_count:", "{} for country of origin {} in {}-{}. Value set to -9999. Error:", "############################################################## # General logic for Carto # should be the same for most", "asylum for year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4. Insert new rows", "= len(new_rows) if new_count: logging.info('Pushing {} new rows'.format(new_count)) cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_rows) #", "2. get sorted ids (old->new) r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv') ids =", "createTableWithIndex(table, schema, id_field, time_field=''): '''Get existing ids or create table''' cartosql.createTable(table, schema) cartosql.createIndex(table,", "import cartosql import requests import json # Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE =", "TIME_FIELD = 'date' DATA_DIR = 'data' LOG_LEVEL = logging.INFO DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST", "'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = { 'Content-Type': 'application/json', 'Authorization':", "is still down.') num_new = len(new_ids) return num_new ############################################################## # General logic for", "defaultdict(int)) asylums = defaultdict(lambda: defaultdict(int)) unknown_vals_origins = defaultdict(list) unknown_vals_asylums = defaultdict(list) for obs", "cartosql.createIndex(table, time_field) def getIds(table, id_field): '''get ids from table''' r = cartosql.getFields(id_field, table,", "# Decrement year year -= 1 except json.decoder.JSONDecodeError: logging.info('API is still down.') num_new", "genUID(date, cntry, valuetype) if UID not in existing_ids + new_ids: new_ids.append(UID) if month", "insertIfNew(data, year, valuetype, existing_ids, new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop over months in the", "try: while year > MAXAGE and new_count: # get and parse each page;", "of asylum for year {}'.format(year)) insert_kwargs.update(data=asylums, valuetype='country_of_asylum', unknown_vals=unknown_vals_asylums) insertIfNew(**insert_kwargs) # 4. Insert new", "val, True] else: logging.debug('All known stats released for {} in {}-{}'.format(cntry, year, month))", "# 2. get sorted ids (old->new) r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv') ids", "r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return most_recent_date def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL)", "from {}'.format(num_dropped, table)) def get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True) dates =", "data[cntry].items(): date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID = genUID(date, cntry, valuetype) if UID", "0 try: asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing value {}", "table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1. Check if table", "= r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return most_recent_date def main(): logging.basicConfig(stream=sys.stderr,", "key=os.getenv('CARTO_KEY')) # 1. Check if table exists and create table existing_ids = []", "obs['value'] except Exception as e: logging.debug(\"Error processing value {} for country of asylum", "requests import json # Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA =", "f='csv') ids = r.text.split('\\r\\n')[1:-1] # 3. delete excess if len(ids) > max_rows: r", "IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1. Check if table exists and create", "unknown_vals, date_format=DATE_FORMAT): '''Loop over months in the data, add to new rows if", "logging.info('Fetching existing ids') existing_ids = getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table {} does not exist,", "'''Delete excess rows by age or count''' num_dropped = 0 if isinstance(max_age, datetime.datetime):", "observations deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1)) # Get most recent update date", "if month in unknown_vals[cntry]: logging.debug('Some stats confidental for {} in {}-{}'.format(cntry, year, month))", "# get and parse each page; stop when no new results or 200", "New: {}, Max: {}'.format( existing_count, num_new, MAXROWS)) # 3. Remove old observations deleteExcessRows(CARTO_TABLE,", "unknown_vals[cntry]: logging.debug('Some stats confidental for {} in {}-{}'.format(cntry, year, month)) values = [UID,", "for country of origin {} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e))", "{} in {}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e)) unknown_vals_asylums[obs['country_of_asylum']].append(obs['month']) asylums[obs['country_of_asylum']][obs['month']] += 0", "age if max_age: r = cartosql.deleteRows(table, \"{} < '{}'\".format(time_field, max_age)) num_dropped = r.json()['total_rows']", "1 new_ids = [] try: while year > MAXAGE and new_count: # get", "valuetype) def insertIfNew(data, year, valuetype, existing_ids, new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop over months", "data: try: origins[obs['country_of_origin']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing value {}", "= [31,28,31,30,31,30,31,31,30,31,30,31] for cntry in data: for month, val in data[cntry].items(): date =", "General logic for Carto # should be the same for most tabular datasets", "country, valuetype): '''Generate unique id''' return '{}_{}_{}'.format(country, date, valuetype) def insertIfNew(data, year, valuetype,", "the same for most tabular datasets ############################################################## def createTableWithIndex(table, schema, id_field, time_field=''): '''Get", "len(ids) > max_rows: r = cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped += r.json()['total_rows'] if num_dropped: logging.info('Dropped", "'+str(e)) def genUID(date, country, valuetype): '''Generate unique id''' return '{}_{}_{}'.format(country, date, valuetype) def", "'text') ]) UID_FIELD = '_UID' TIME_FIELD = 'date' DATA_DIR = 'data' LOG_LEVEL =", "cartosql.deleteRows(table, \"{} < '{}'\".format(time_field, max_age)) num_dropped = r.json()['total_rows'] # 2. get sorted ids", "+= obs['value'] except Exception as e: logging.debug(\"Error processing value {} for country of", "create table''' cartosql.createTable(table, schema) cartosql.createIndex(table, id_field, unique=True) if time_field: cartosql.createIndex(table, time_field) def getIds(table,", "%H:%M:%S') return most_recent_date def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE,", "Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0 try: asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except Exception as", "processNewData(existing_ids): ''' Iterively fetch parse and post new data ''' year = datetime.datetime.today().year", "month=month, day=last_day[month-1]).strftime(date_format) UID = genUID(date, cntry, valuetype) if UID not in existing_ids +", "year, month)) values = [UID, date, cntry, valuetype, val, True] else: logging.debug('All known", "logging.info('API is still down.') num_new = len(new_ids) return num_new ############################################################## # General logic", "month, val in data[cntry].items(): date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID = genUID(date, cntry,", "defaultdict(int)) unknown_vals_origins = defaultdict(list) unknown_vals_asylums = defaultdict(list) for obs in data: try: origins[obs['country_of_origin']][obs['month']]", "except json.decoder.JSONDecodeError: logging.info('API is still down.') num_new = len(new_ids) return num_new ############################################################## #", "''' Iterively fetch parse and post new data ''' year = datetime.datetime.today().year new_count", "than 20yrs MAXROWS = 1000000 MAXAGE = datetime.datetime.today().year - 20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede'", "requests.get(LATEST_URL.format(year=year)) data = r.json() logging.debug('data: {}'.format(data)) # 2. Collect Totals origins = defaultdict(lambda:", "new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for cntry in data: for month, val in data[cntry].items():", "CARTO_SCHEMA = OrderedDict([ ('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'), ('value_type', 'text'), ('num_people', 'numeric'),", "new_count: # get and parse each page; stop when no new results or", "if CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1.", "month)) values = [UID, date, cntry, valuetype, val, True] else: logging.debug('All known stats", "def lastUpdateDate(dataset, date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset) headers = { 'Content-Type': 'application/json', 'Authorization': os.getenv('apiToken')", "[UID, date, cntry, valuetype, val, False] new_rows.append(values) def processNewData(existing_ids): ''' Iterively fetch parse", "in data: for month, val in data[cntry].items(): date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format) UID", "NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1. Check if table exists and create table existing_ids", "= 1000000 MAXAGE = datetime.datetime.today().year - 20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date):", "'{}_{}_{}'.format(country, date, valuetype) def insertIfNew(data, year, valuetype, existing_ids, new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT): '''Loop", "still down.') num_new = len(new_ids) return num_new ############################################################## # General logic for Carto", "add to new rows if new''' last_day = [31,28,31,30,31,30,31,31,30,31,30,31] for cntry in data:", "results or 200 pages # 1. Fetch new data logging.info(\"Fetching data for year", "cntry, valuetype) if UID not in existing_ids + new_ids: new_ids.append(UID) if month in", "ids (old->new) r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field), f='csv') ids = r.text.split('\\r\\n')[1:-1] # 3.", "def genUID(date, country, valuetype): '''Generate unique id''' return '{}_{}_{}'.format(country, date, valuetype) def insertIfNew(data,", "body = { \"dataLastUpdated\": date.isoformat() } try: r = requests.patch(url = apiUrl, json", "defaultdict(list) unknown_vals_asylums = defaultdict(list) for obs in data: try: origins[obs['country_of_origin']][obs['month']] += obs['value'] except", "= cartosql.deleteRows(table, \"{} < '{}'\".format(time_field, max_age)) num_dropped = r.json()['total_rows'] # 2. get sorted", "logging.error('[lastUpdated]: '+str(e)) def genUID(date, country, valuetype): '''Generate unique id''' return '{}_{}_{}'.format(country, date, valuetype)", "'application/json', 'Authorization': os.getenv('apiToken') } body = { \"dataLastUpdated\": date.isoformat() } try: r =", "{}-{}. Value set to -9999. Error: {}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0 try: asylums[obs['country_of_asylum']][obs['month']]", "ids or create table''' cartosql.createTable(table, schema) cartosql.createIndex(table, id_field, unique=True) if time_field: cartosql.createIndex(table, time_field)", "= 'date' DATA_DIR = 'data' LOG_LEVEL = logging.INFO DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST =", "= r.json()['total_rows'] # 2. get sorted ids (old->new) r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field),", "new results or 200 pages # 1. Fetch new data logging.info(\"Fetching data for", "new data num_new = processNewData(existing_ids) existing_count = num_new + len(existing_ids) logging.info('Total rows: {},", "= datetime.datetime.today().year - 20 DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede' def lastUpdateDate(dataset, date): apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)", "# 1. Fetch new data logging.info(\"Fetching data for year {}\".format(year)) r = requests.get(LATEST_URL.format(year=year))", "time_field=''): '''Get existing ids or create table''' cartosql.createTable(table, schema) cartosql.createIndex(table, id_field, unique=True) if", "ids[:-max_rows]) num_dropped += r.json()['total_rows'] if num_dropped: logging.info('Dropped {} old rows from {}'.format(num_dropped, table))", "1. Fetch new data logging.info(\"Fetching data for year {}\".format(year)) r = requests.get(LATEST_URL.format(year=year)) data", "{}-{}'.format(cntry, year, month)) values = [UID, date, cntry, valuetype, val, True] else: logging.debug('All", "page; stop when no new results or 200 pages # 1. Fetch new", "ids from table''' r = cartosql.getFields(id_field, table, f='csv') return r.text.split('\\r\\n')[1:-1] def deleteExcessRows(table, max_rows,", "# 3. Create Unique IDs, create new rows new_rows = [] logging.debug('Create data", "cartosql import requests import json # Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests'", "table, order='{}'.format(time_field), f='csv') ids = r.text.split('\\r\\n')[1:-1] # 3. delete excess if len(ids) >", "return most_recent_date def main(): logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL) logging.info('STARTING') if CLEAR_TABLE_FIRST: logging.info('Clearing table') cartosql.deleteRows(CARTO_TABLE, 'cartodb_id", "data for year {}\".format(year)) r = requests.get(LATEST_URL.format(year=year)) data = r.json() logging.debug('data: {}'.format(data)) #", "DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST = False # Limit 1M rows, drop older than", "user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY')) # 1. Check if table exists and create table existing_ids =", "= max_age.isoformat() # 1. delete by age if max_age: r = cartosql.deleteRows(table, \"{}", "origin for year {}'.format(year)) insert_kwargs = { 'data':origins,'year':year,'valuetype':'country_of_origin', 'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows, 'unknown_vals':unknown_vals_origins } insertIfNew(**insert_kwargs) logging.debug('Create", "id''' return '{}_{}_{}'.format(country, date, valuetype) def insertIfNew(data, year, valuetype, existing_ids, new_ids, new_rows, unknown_vals,", "older than 20yrs MAXROWS = 1000000 MAXAGE = datetime.datetime.today().year - 20 DATASET_ID =", "import logging import sys from collections import OrderedDict, defaultdict import datetime import cartosql", "delete excess if len(ids) > max_rows: r = cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped += r.json()['total_rows']", "{ \"dataLastUpdated\": date.isoformat() } try: r = requests.patch(url = apiUrl, json = body,", "headers) logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code)) return 0 except Exception", "valuetype, val, True] else: logging.debug('All known stats released for {} in {}-{}'.format(cntry, year,", "CLEAR_TABLE_FIRST = False # Limit 1M rows, drop older than 20yrs MAXROWS =", "data num_new = processNewData(existing_ids) existing_count = num_new + len(existing_ids) logging.info('Total rows: {}, New:", "= genUID(date, cntry, valuetype) if UID not in existing_ids + new_ids: new_ids.append(UID) if", "return '{}_{}_{}'.format(country, date, valuetype) def insertIfNew(data, year, valuetype, existing_ids, new_ids, new_rows, unknown_vals, date_format=DATE_FORMAT):", "if table exists and create table existing_ids = [] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing", "200 pages # 1. Fetch new data logging.info(\"Fetching data for year {}\".format(year)) r", "values = [UID, date, cntry, valuetype, val, True] else: logging.debug('All known stats released", "requests.patch(url = apiUrl, json = body, headers = headers) logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat()", "for {} in {}-{}'.format(cntry, year, month)) values = [UID, date, cntry, valuetype, val,", "max_age)) num_dropped = r.json()['total_rows'] # 2. get sorted ids (old->new) r = cartosql.getFields('cartodb_id',", "TIME_FIELD) # 2. Iterively fetch, parse and post new data num_new = processNewData(existing_ids)", "\"dataLastUpdated\": date.isoformat() } try: r = requests.patch(url = apiUrl, json = body, headers", "False # Limit 1M rows, drop older than 20yrs MAXROWS = 1000000 MAXAGE", "table, f='csv', post=True) dates = r.text.split('\\r\\n')[1:-1] dates.sort() most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S') return", "= logging.INFO DATE_FORMAT = '%Y-%m-%d' CLEAR_TABLE_FIRST = False # Limit 1M rows, drop", "time_field: cartosql.createIndex(table, time_field) def getIds(table, id_field): '''get ids from table''' r = cartosql.getFields(id_field,", "obs in data: try: origins[obs['country_of_origin']][obs['month']] += obs['value'] except Exception as e: logging.debug(\"Error processing", "[] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids') existing_ids = getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table {}", "Exception as e: logging.debug(\"Error processing value {} for country of asylum {} in", "be the same for most tabular datasets ############################################################## def createTableWithIndex(table, schema, id_field, time_field=''):", "e: logging.error('[lastUpdated]: '+str(e)) def genUID(date, country, valuetype): '''Generate unique id''' return '{}_{}_{}'.format(country, date,", "parse each page; stop when no new results or 200 pages # 1.", "{}\".format(obs['value'],obs['country_of_origin'],year,obs['month'],e)) unknown_vals_origins[obs['country_of_origin']].append(obs['month']) origins[obs['country_of_origin']][obs['month']] += 0 try: asylums[obs['country_of_asylum']][obs['month']] += obs['value'] except Exception as e:", "> max_rows: r = cartosql.deleteRowsByIDs(table, ids[:-max_rows]) num_dropped += r.json()['total_rows'] if num_dropped: logging.info('Dropped {}", "in {}-{}'.format(cntry, year, month)) values = [UID, date, cntry, valuetype, val, True] else:", "if UID not in existing_ids + new_ids: new_ids.append(UID) if month in unknown_vals[cntry]: logging.debug('Some", "[] try: while year > MAXAGE and new_count: # get and parse each", "OrderedDict([ ('_UID', 'text'), ('date', 'timestamp'), ('country', 'text'), ('value_type', 'text'), ('num_people', 'numeric'), ('some_stats_confidential', 'text')", "2. Collect Totals origins = defaultdict(lambda: defaultdict(int)) asylums = defaultdict(lambda: defaultdict(int)) unknown_vals_origins =", "{} old rows from {}'.format(num_dropped, table)) def get_most_recent_date(table): r = cartosql.getFields(TIME_FIELD, table, f='csv',", "0 if isinstance(max_age, datetime.datetime): max_age = max_age.isoformat() # 1. delete by age if", "import json # Constants LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}' CARTO_TABLE = 'soc_038_monthly_asylum_requests' CARTO_SCHEMA = OrderedDict([", "= '_UID' TIME_FIELD = 'date' DATA_DIR = 'data' LOG_LEVEL = logging.INFO DATE_FORMAT =", "no new results or 200 pages # 1. Fetch new data logging.info(\"Fetching data", "new_ids = [] try: while year > MAXAGE and new_count: # get and", "year {}\".format(year)) r = requests.get(LATEST_URL.format(year=year)) data = r.json() logging.debug('data: {}'.format(data)) # 2. Collect", "logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code)) return 0 except Exception as", "valuetype) if UID not in existing_ids + new_ids: new_ids.append(UID) if month in unknown_vals[cntry]:", "= [] if cartosql.tableExists(CARTO_TABLE): logging.info('Fetching existing ids') existing_ids = getIds(CARTO_TABLE, UID_FIELD) else: logging.info('Table" ]
[ "for feedback-related jobs.\"\"\" import ast from core.domain import feedback_jobs_one_off from core.domain import feedback_services", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "language governing permissions and # limitations under the License. \"\"\"Tests for feedback-related jobs.\"\"\"", "License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "subject' } USER_EMAIL = '<EMAIL>' USER_USERNAME = 'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL,", "self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries(", "Check that the first message has two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get the", "this file except in compliance with the License. # You may obtain a", "thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check", "self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges in England', category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture',", "self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges in England', category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2,", "job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long", "under the License. \"\"\"Tests for feedback-related jobs.\"\"\" import ast from core.domain import feedback_jobs_one_off", "feedback_jobs_one_off from core.domain import feedback_services from core.domain import subscription_services from core.platform import models", "ANY KIND, either express or implied. # See the License for the specific", "it and check the error # case. first_message_model = ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1],", "self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries, _", "feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint:", "\"\"\"Runs the one-off MapReduce job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id)", "language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture', language_code='fi') def _run_one_off_job(self): \"\"\"Runs the one-off MapReduce", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "def test_message_count(self): \"\"\"Test if the job returns the correct message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1,", "'editor message') self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the", "core.platform import models from core.tests import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services()", "self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges in England', category='Architecture', language_code='en') self.save_new_valid_exploration(", "= '<EMAIL>' USER_USERNAME = 'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id =", "self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id,", "returns the correct message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here')", "used here') thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids)", "message has only one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None, None,", "self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check that the second message has only one message. self.assertEqual(thread_summaries[1]['total_message_count'],", "OF ANY KIND, either express or implied. # See the License for the", "that the first message has two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get the first", "and # limitations under the License. \"\"\"Tests for feedback-related jobs.\"\"\" import ast from", "= ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete() output = self._run_one_off_job() # Check if", "thread message counter job.\"\"\" EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT = {", "# Check that the first message has two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get", "taskqueue_services = models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the one-off feedback thread message counter", "job.\"\"\" EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT = { 'status': u'open', 'state_name':", "u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username': None, 'subject': u'a subject' } USER_EMAIL =", "Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the", "'status': u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username': None, 'subject': u'a subject' } USER_EMAIL", "self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges in England', category='Architecture',", "core.tests import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for", "[ast.literal_eval(stringified_item) for stringified_item in stringified_output] return eval_output def test_message_count(self): \"\"\"Test if the job", "feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete() output = self._run_one_off_job() # Check if the quantities", "we can delete it and check the error # case. first_message_model = (", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. #", "disable=line-too-long job_id)) eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output] return eval_output def test_message_count(self):", "only one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None, None, 'editor message')", "test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the one-off", "first_message_model = ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete() output = self._run_one_off_job() # Check", "thread_ids) # Check that the first message has two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) #", "'subject': u'a subject' } USER_EMAIL = '<EMAIL>' USER_USERNAME = 'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest,", "self.owner_id, title='Bridges in England', category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture', language_code='fi') def", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "= feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first message has only one", "the error # case. first_message_model = ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete() output", "self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges in England', category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>',", "the first message has only one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check that the", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "thread_ids[0].split('.')[1], 0)) first_message_model.delete() output = self._run_one_off_job() # Check if the quantities have the", "models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the one-off feedback thread message", "message has only one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check that the second message", "self.user_id) self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first", "required by applicable law or agreed to in writing, software # distributed under", "'summary': None, 'original_author_username': None, 'subject': u'a subject' } USER_EMAIL = '<EMAIL>' USER_USERNAME =", "\"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "one-off feedback thread message counter job.\"\"\" EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT", "applicable law or agreed to in writing, software # distributed under the License", "self.user_id, None, None, 'editor message') self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) #", "FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the one-off feedback thread message counter job.\"\"\" EXP_ID_1 = 'eid1'", "self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges in England', category='Architecture', language_code='en')", "or agreed to in writing, software # distributed under the License is distributed", "core.domain import feedback_jobs_one_off from core.domain import feedback_services from core.domain import subscription_services from core.platform", "language_code='fi') def _run_one_off_job(self): \"\"\"Runs the one-off MapReduce job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint:", "u'a subject' } USER_EMAIL = '<EMAIL>' USER_USERNAME = 'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp()", "that the first message has only one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check that", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "pylint: disable=line-too-long job_id)) eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output] return eval_output def", "'not used here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread_ids =", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "= self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges in England',", "# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved.", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "# # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed", "License. # You may obtain a copy of the License at # #", "super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration(", "MapReduce job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS),", "# pylint: disable=line-too-long job_id)) eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output] return eval_output", "self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None, None, 'editor message') self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id,", "self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries, _ =", "= { 'status': u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username': None, 'subject': u'a subject'", "compliance with the License. # You may obtain a copy of the License", "= subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that", "models from core.tests import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase):", "permissions and # limitations under the License. \"\"\"Tests for feedback-related jobs.\"\"\" import ast", "= 'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not", "# pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output = (", "self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges", "self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here')", "software # distributed under the License is distributed on an \"AS-IS\" BASIS, #", "subscription_services from core.platform import models from core.tests import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services", "USER_USERNAME = 'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL,", "is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "distributed under the License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES", "not use this file except in compliance with the License. # You may", "the one-off MapReduce job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual(", "self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges in", "jobs.\"\"\" import ast from core.domain import feedback_jobs_one_off from core.domain import feedback_services from core.domain", "the job returns the correct message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not", "License, Version 2.0 (the \"License\"); # you may not use this file except", "Get the first message so that we can delete it and check the", "import ast from core.domain import feedback_jobs_one_off from core.domain import feedback_services from core.domain import", "ast from core.domain import feedback_jobs_one_off from core.domain import feedback_services from core.domain import subscription_services", "counter job.\"\"\" EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT = { 'status': u'open',", "self.user_id, thread_ids) # Check that the first message has only one message. self.assertEqual(thread_summaries[0]['total_message_count'],", "check the error # case. first_message_model = ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete()", "self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries,", "None, 'original_author_username': None, 'subject': u'a subject' } USER_EMAIL = '<EMAIL>' USER_USERNAME = 'user'", "the correct message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread(", "title='Bridges in England', category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture', language_code='fi') def _run_one_off_job(self):", "# you may not use this file except in compliance with the License.", "2) # Get the first message so that we can delete it and", "agreed to in writing, software # distributed under the License is distributed on", "core.domain import feedback_services from core.domain import subscription_services from core.platform import models from core.tests", "1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None, None, 'editor message') self._run_one_off_job() thread_summaries, _ =", "one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check that the second message has only one", "error # case. first_message_model = ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete() output =", "(the \"License\"); # you may not use this file except in compliance with", "(feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the one-off feedback", "= 'eid2' EXPECTED_THREAD_DICT = { 'status': u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username': None,", "# Unless required by applicable law or agreed to in writing, software #", "first message has only one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check that the second", "by applicable law or agreed to in writing, software # distributed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get the first message so that we can delete it", "category='Architecture', language_code='fi') def _run_one_off_job(self): \"\"\"Runs the one-off MapReduce job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() #", "eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output] return eval_output def test_message_count(self): \"\"\"Test if", "feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first message has only one message.", "feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output =", "category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture', language_code='fi') def _run_one_off_job(self): \"\"\"Runs the one-off", "self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete() output = self._run_one_off_job() # Check if the quantities have", "= feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output", "file except in compliance with the License. # You may obtain a copy", "# Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under", "= models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the one-off feedback thread message counter job.\"\"\"", "from core.domain import feedback_jobs_one_off from core.domain import feedback_services from core.domain import subscription_services from", "for stringified_item in stringified_output] return eval_output def test_message_count(self): \"\"\"Test if the job returns", "and check the error # case. first_message_model = ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0))", "License for the specific language governing permissions and # limitations under the License.", "for the one-off feedback thread message counter job.\"\"\" EXP_ID_1 = 'eid1' EXP_ID_2 =", "None, None, 'editor message') self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check", "to in writing, software # distributed under the License is distributed on an", "self.EXP_ID_1, self.owner_id, title='Bridges in England', category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture', language_code='fi')", "License. \"\"\"Tests for feedback-related jobs.\"\"\" import ast from core.domain import feedback_jobs_one_off from core.domain", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the", "setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)", "self.process_and_flush_pending_tasks() stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long job_id)) eval_output = [ast.literal_eval(stringified_item) for", "'<EMAIL>' USER_USERNAME = 'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL)", "only one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check that the second message has only", "self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture', language_code='fi') def _run_one_off_job(self): \"\"\"Runs the one-off MapReduce job.\"\"\" job_id", "( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long job_id)) eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output]", "feedback-related jobs.\"\"\" import ast from core.domain import feedback_jobs_one_off from core.domain import feedback_services from", "case. first_message_model = ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete() output = self._run_one_off_job() #", "or implied. # See the License for the specific language governing permissions and", "from core.tests import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests", "# Check if the quantities have the correct values. self.assertEqual(output[0][1]['message_count'], 1) self.assertEqual(output[0][1]['next_message_id'], 2)", "in England', category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture', language_code='fi') def _run_one_off_job(self): \"\"\"Runs", "feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None, None, 'editor message') self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries(", "} USER_EMAIL = '<EMAIL>' USER_USERNAME = 'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME)", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "def _run_one_off_job(self): \"\"\"Runs the one-off MapReduce job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "that we can delete it and check the error # case. first_message_model =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id,", "an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread_ids", "message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check that the second message has only one message.", "# See the License for the specific language governing permissions and # limitations", "from core.domain import feedback_services from core.domain import subscription_services from core.platform import models from", "self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long job_id))", "specific language governing permissions and # limitations under the License. \"\"\"Tests for feedback-related", "the License. \"\"\"Tests for feedback-related jobs.\"\"\" import ast from core.domain import feedback_jobs_one_off from", "USER_EMAIL = '<EMAIL>' USER_USERNAME = 'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id", "correct message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread( self.EXP_ID_2,", "the specific language governing permissions and # limitations under the License. \"\"\"Tests for", "two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get the first message so that we can", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "if the job returns the correct message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'],", "you may not use this file except in compliance with the License. #", "delete it and check the error # case. first_message_model = ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1,", "import feedback_services from core.domain import subscription_services from core.platform import models from core.tests import", "= [ast.literal_eval(stringified_item) for stringified_item in stringified_output] return eval_output def test_message_count(self): \"\"\"Test if the", "None, 'subject': u'a subject' } USER_EMAIL = '<EMAIL>' USER_USERNAME = 'user' def setUp(self):", "stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long job_id)) eval_output = [ast.literal_eval(stringified_item) for stringified_item", "second message has only one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None,", "self.user_id, thread_ids) # Check that the first message has two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2)", "for the specific language governing permissions and # limitations under the License. \"\"\"Tests", "use this file except in compliance with the License. # You may obtain", "job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)", "self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None, None, 'editor message') self._run_one_off_job() thread_summaries, _", "<filename>core/domain/feedback_jobs_one_off_test.py # coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights", "distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "has only one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check that the second message has", "job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks()", "from core.domain import subscription_services from core.platform import models from core.tests import test_utils (feedback_models,)", "Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "message has two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get the first message so that", "first_message_model.delete() output = self._run_one_off_job() # Check if the quantities have the correct values.", "2.0 (the \"License\"); # you may not use this file except in compliance", "feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first message has two messages. self.assertEqual(thread_summaries[0]['total_message_count'],", "test_message_count(self): \"\"\"Test if the job returns the correct message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'],", "message counter job.\"\"\" EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT = { 'status':", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "_run_one_off_job(self): \"\"\"Runs the one-off MapReduce job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue(", "Check that the first message has only one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) # Check", "1) # Check that the second message has only one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1)", "\"\"\"Tests for the one-off feedback thread message counter job.\"\"\" EXP_ID_1 = 'eid1' EXP_ID_2", "self._run_one_off_job() # Check if the quantities have the correct values. self.assertEqual(output[0][1]['message_count'], 1) self.assertEqual(output[0][1]['next_message_id'],", "EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT = { 'status': u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username':", "# # Unless required by applicable law or agreed to in writing, software", "# distributed under the License is distributed on an \"AS-IS\" BASIS, # WITHOUT", "the first message has two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get the first message", "express or implied. # See the License for the specific language governing permissions", "core.domain import subscription_services from core.platform import models from core.tests import test_utils (feedback_models,) =", "stringified_item in stringified_output] return eval_output def test_message_count(self): \"\"\"Test if the job returns the", "return eval_output def test_message_count(self): \"\"\"Test if the job returns the correct message count.\"\"\"", "self.owner_id, title='<NAME>', category='Architecture', language_code='fi') def _run_one_off_job(self): \"\"\"Runs the one-off MapReduce job.\"\"\" job_id =", "either express or implied. # See the License for the specific language governing", "one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None, None, 'editor message') self._run_one_off_job()", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "stringified_output] return eval_output def test_message_count(self): \"\"\"Test if the job returns the correct message", "pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output(", "governing permissions and # limitations under the License. \"\"\"Tests for feedback-related jobs.\"\"\" import", "the one-off feedback thread message counter job.\"\"\" EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2'", "1) self.process_and_flush_pending_tasks() stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long job_id)) eval_output = [ast.literal_eval(stringified_item)", "self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture', language_code='fi') def _run_one_off_job(self): \"\"\"Runs the one-off MapReduce job.\"\"\"", "Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version", "the second message has only one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id,", "the License. # You may obtain a copy of the License at #", "_ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first message has two", "'eid1' EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT = { 'status': u'open', 'state_name': u'a_state_name', 'summary': None,", "{ 'status': u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username': None, 'subject': u'a subject' }", "in writing, software # distributed under the License is distributed on an \"AS-IS\"", "job returns the correct message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used", "has two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get the first message so that we", "# case. first_message_model = ( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete() output = self._run_one_off_job()", "so that we can delete it and check the error # case. first_message_model", "subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the", "thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first message has", "thread_ids[0].split('.')[1], self.user_id, None, None, 'editor message') self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids)", "with the License. # You may obtain a copy of the License at", "'original_author_username': None, 'subject': u'a subject' } USER_EMAIL = '<EMAIL>' USER_USERNAME = 'user' def", "title='<NAME>', category='Architecture', language_code='fi') def _run_one_off_job(self): \"\"\"Runs the one-off MapReduce job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new()", "thread_ids) # Check that the first message has only one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1)", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "the License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "job_id)) eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output] return eval_output def test_message_count(self): \"\"\"Test", "feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'],", "on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "# Get the first message so that we can delete it and check", "u'a_state_name', 'summary': None, 'original_author_username': None, 'subject': u'a subject' } USER_EMAIL = '<EMAIL>' USER_USERNAME", "0)) first_message_model.delete() output = self._run_one_off_job() # Check if the quantities have the correct", "taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long job_id)) eval_output =", "limitations under the License. \"\"\"Tests for feedback-related jobs.\"\"\" import ast from core.domain import", "self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1,", "self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used", "used here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread_ids = subscription_services.get_all_threads_subscribed_to(", "has only one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None, None, 'editor", "in stringified_output] return eval_output def test_message_count(self): \"\"\"Test if the job returns the correct", "under the License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR", "import subscription_services from core.platform import models from core.tests import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback])", "can delete it and check the error # case. first_message_model = ( feedback_models.FeedbackMessageModel.get(", "import feedback_jobs_one_off from core.domain import feedback_services from core.domain import subscription_services from core.platform import", "'not used here') thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id,", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "( feedback_models.FeedbackMessageModel.get( self.EXP_ID_1, thread_ids[0].split('.')[1], 0)) first_message_model.delete() output = self._run_one_off_job() # Check if the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long job_id)) eval_output", "feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job()", "See the License for the specific language governing permissions and # limitations under", "import models from core.tests import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services() class", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get the first message so that we can delete", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # #", "message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'],", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue( taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1) self.process_and_flush_pending_tasks() stringified_output = ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( #", "one-off MapReduce job.\"\"\" job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue( job_id) self.assertEqual( self.count_jobs_in_taskqueue(", "None, 'editor message') self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that", "first message has two messages. self.assertEqual(thread_summaries[0]['total_message_count'], 2) # Get the first message so", "'state_name': u'a_state_name', 'summary': None, 'original_author_username': None, 'subject': u'a subject' } USER_EMAIL = '<EMAIL>'", "first message so that we can delete it and check the error #", "self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first message", "feedback thread message counter job.\"\"\" EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT =", "class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the one-off feedback thread message counter job.\"\"\" EXP_ID_1 =", "# Check that the first message has only one message. self.assertEqual(thread_summaries[0]['total_message_count'], 1) #", "= ( feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long job_id)) eval_output = [ast.literal_eval(stringified_item) for stringified_item in", "that the second message has only one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1],", "# Check that the second message has only one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message(", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "# limitations under the License. \"\"\"Tests for feedback-related jobs.\"\"\" import ast from core.domain", "'eid2' EXPECTED_THREAD_DICT = { 'status': u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username': None, 'subject':", "writing, software # distributed under the License is distributed on an \"AS-IS\" BASIS,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long job_id)) eval_output = [ast.literal_eval(stringified_item) for stringified_item in stringified_output] return", "The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License,", "EXPECTED_THREAD_DICT = { 'status': u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username': None, 'subject': u'a", "from core.platform import models from core.tests import test_utils (feedback_models,) = models.Registry.import_models([models.NAMES.feedback]) taskqueue_services =", "= models.Registry.import_models([models.NAMES.feedback]) taskqueue_services = models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the one-off feedback thread", "eval_output def test_message_count(self): \"\"\"Test if the job returns the correct message count.\"\"\" feedback_services.create_thread(", "here') thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id) self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) #", "def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id =", "message') self._run_one_off_job() thread_summaries, _ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first", "= 'eid1' EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT = { 'status': u'open', 'state_name': u'a_state_name', 'summary':", "= self._run_one_off_job() # Check if the quantities have the correct values. self.assertEqual(output[0][1]['message_count'], 1)", "Check that the second message has only one message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1,", "here') feedback_services.create_thread( self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'], self.user_id, self.EXPECTED_THREAD_DICT['subject'], 'not used here') thread_ids = subscription_services.get_all_threads_subscribed_to( self.user_id)", "_ = feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first message has only", "feedback_services from core.domain import subscription_services from core.platform import models from core.tests import test_utils", "England', category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id, title='<NAME>', category='Architecture', language_code='fi') def _run_one_off_job(self): \"\"\"Runs the", "message so that we can delete it and check the error # case.", "message. self.assertEqual(thread_summaries[1]['total_message_count'], 1) feedback_services.create_message( self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None, None, 'editor message') self._run_one_off_job() thread_summaries,", "'user' def setUp(self): super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp() self.signup(self.USER_EMAIL, self.USER_USERNAME) self.user_id = self.get_user_id_from_email(self.USER_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id", "\"\"\"Test if the job returns the correct message count.\"\"\" feedback_services.create_thread( self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'], self.user_id,", "2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache", "= feedback_services.get_thread_summaries( self.user_id, thread_ids) # Check that the first message has two messages.", "= self.get_user_id_from_email(self.OWNER_EMAIL) self.save_new_valid_exploration( self.EXP_ID_1, self.owner_id, title='Bridges in England', category='Architecture', language_code='en') self.save_new_valid_exploration( self.EXP_ID_2, self.owner_id,", "EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXPECTED_THREAD_DICT = { 'status': u'open', 'state_name': u'a_state_name',", "models.Registry.import_taskqueue_services() class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase): \"\"\"Tests for the one-off feedback thread message counter job.\"\"\" EXP_ID_1", "the first message so that we can delete it and check the error", "output = self._run_one_off_job() # Check if the quantities have the correct values. self.assertEqual(output[0][1]['message_count'],", "\"\"\"Tests for feedback-related jobs.\"\"\" import ast from core.domain import feedback_jobs_one_off from core.domain import" ]
[ "# https://iximiuz.com/ru/posts/over-9000-ways-to-make-web-server-in-python/ # https://www.pyimagesearch.com/2019/04/15/live-video-streaming-over-network-with-opencv-and-imagezmq/ # python server.py 1234 import web urls = (", "import web urls = ( '/', 'index' ) class index: def GET(self): return", "https://www.pyimagesearch.com/2019/04/15/live-video-streaming-over-network-with-opencv-and-imagezmq/ # python server.py 1234 import web urls = ( '/', 'index' )", "= ( '/', 'index' ) class index: def GET(self): return \"Hello, world!\" if", "'index' ) class index: def GET(self): return \"Hello, world!\" if __name__ == \"__main__\":", "urls = ( '/', 'index' ) class index: def GET(self): return \"Hello, world!\"", "class index: def GET(self): return \"Hello, world!\" if __name__ == \"__main__\": app =", "https://webpy.org/docs/0.3/tutorial # https://iximiuz.com/ru/posts/over-9000-ways-to-make-web-server-in-python/ # https://www.pyimagesearch.com/2019/04/15/live-video-streaming-over-network-with-opencv-and-imagezmq/ # python server.py 1234 import web urls =", "https://iximiuz.com/ru/posts/over-9000-ways-to-make-web-server-in-python/ # https://www.pyimagesearch.com/2019/04/15/live-video-streaming-over-network-with-opencv-and-imagezmq/ # python server.py 1234 import web urls = ( '/',", "server.py 1234 import web urls = ( '/', 'index' ) class index: def", "1234 import web urls = ( '/', 'index' ) class index: def GET(self):", "python server.py 1234 import web urls = ( '/', 'index' ) class index:", "GET(self): return \"Hello, world!\" if __name__ == \"__main__\": app = web.application(urls, globals()) app.run()", "'/', 'index' ) class index: def GET(self): return \"Hello, world!\" if __name__ ==", "# python server.py 1234 import web urls = ( '/', 'index' ) class", "# https://webpy.org/docs/0.3/tutorial # https://iximiuz.com/ru/posts/over-9000-ways-to-make-web-server-in-python/ # https://www.pyimagesearch.com/2019/04/15/live-video-streaming-over-network-with-opencv-and-imagezmq/ # python server.py 1234 import web urls", "web urls = ( '/', 'index' ) class index: def GET(self): return \"Hello,", ") class index: def GET(self): return \"Hello, world!\" if __name__ == \"__main__\": app", "index: def GET(self): return \"Hello, world!\" if __name__ == \"__main__\": app = web.application(urls,", "def GET(self): return \"Hello, world!\" if __name__ == \"__main__\": app = web.application(urls, globals())", "<reponame>ovvladimir/Servers<gh_stars>0 # https://webpy.org/docs/0.3/tutorial # https://iximiuz.com/ru/posts/over-9000-ways-to-make-web-server-in-python/ # https://www.pyimagesearch.com/2019/04/15/live-video-streaming-over-network-with-opencv-and-imagezmq/ # python server.py 1234 import web", "# https://www.pyimagesearch.com/2019/04/15/live-video-streaming-over-network-with-opencv-and-imagezmq/ # python server.py 1234 import web urls = ( '/', 'index'", "( '/', 'index' ) class index: def GET(self): return \"Hello, world!\" if __name__" ]
[ "os.path.splitext(case)[0] for case in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request):", "[100, 8], \"y\": [10, 9]}, ], (0, 1, 10, 713), ), ], )", "] @pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request): test_case = request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f:", "bounding_box == expected_bounding_box def list_test_cases(): return [ os.path.splitext(case)[0] for case in os.listdir(TEST_DATA_INPUT_DIR) if", "path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor = PathSignatureExtractor( order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5", "path_signature = np.array(json.load(f)) return (strokes, path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor = PathSignatureExtractor( order=2,", "case in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request): test_case =", "from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [ ( [{\"x\": [1, 2,", "Stroke from py_path_signature.path_signature_extractor import PathSignatureExtractor from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\",", "@pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor = PathSignatureExtractor( order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5 )", "2, 3]}], (1, 1, 2, 2), ), ( [{\"x\": [0, 1, 2, 3],", "path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth = strokes_and_reference_signature strokes = [Stroke(**stroke) for stroke", "from py_path_signature.data_models.stroke import Stroke from py_path_signature.path_signature_extractor import PathSignatureExtractor from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR", "{\"x\": [714, 1], \"y\": [3, 4]}, {\"x\": [6, 8], \"y\": [0, 9]}, {\"x\":", "input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box == expected_bounding_box def list_test_cases(): return [ os.path.splitext(case)[0]", "1, 2, 3], \"y\": [1, 2, 3, 4]}, {\"x\": [6, 8, 2, 3],", "from py_path_signature.path_signature_extractor import PathSignatureExtractor from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [", "strokes_and_reference_signature): input_strokes, path_signature_groundtruth = strokes_and_reference_signature strokes = [Stroke(**stroke) for stroke in input_strokes] path_signature", "[10, 9]}, ], (0, 1, 10, 713), ), ], ) def test_bounding_box(input_strokes, expected_bounding_box):", "import json import os import numpy as np import pytest from py_path_signature.data_models.stroke import", "], (0, 1, 10, 713), ), ], ) def test_bounding_box(input_strokes, expected_bounding_box): strokes =", "(0, 1, 10, 713), ), ], ) def test_bounding_box(input_strokes, expected_bounding_box): strokes = [Stroke(**stroke)", "2, 3], \"y\": [1, 2, 3, 4]}, {\"x\": [6, 8, 2, 3], \"y\":", "import numpy as np import pytest from py_path_signature.data_models.stroke import Stroke from py_path_signature.path_signature_extractor import", "4]}, {\"x\": [6, 8], \"y\": [0, 9]}, {\"x\": [100, 8], \"y\": [10, 9]},", "[ os.path.splitext(case)[0] for case in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases()) def", "2), ), ( [{\"x\": [0, 1, 2, 3], \"y\": [1, 2, 3, 4]},", "8], \"y\": [10, 9]}, ], (0, 1, 10, 713), ), ], ) def", "strokes_and_reference_signature strokes = [Stroke(**stroke) for stroke in input_strokes] path_signature = path_signature_extractor.extract_signature(strokes=strokes) assert (path_signature", "\"y\": [0, 9]}, {\"x\": [100, 8], \"y\": [10, 9]}, ], (0, 1, 10,", "py_path_signature.data_models.stroke import Stroke from py_path_signature.path_signature_extractor import PathSignatureExtractor from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize(", "TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [ ( [{\"x\": [1, 2, 3], \"y\": [1, 2,", "open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f: path_signature = np.array(json.load(f)) return (strokes, path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor():", "4]}, {\"x\": [6, 8, 2, 3], \"y\": [0, 2, 3, 9]}], (0, 0,", "test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth = strokes_and_reference_signature strokes = [Stroke(**stroke) for stroke in input_strokes]", "os import numpy as np import pytest from py_path_signature.data_models.stroke import Stroke from py_path_signature.path_signature_extractor", "os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request): test_case = request.param with", "return [ os.path.splitext(case)[0] for case in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases())", "for stroke in input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box == expected_bounding_box def list_test_cases():", "test_case = request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f: strokes = json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR,", "= request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f: strokes = json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\"))", "def strokes_and_reference_signature(request): test_case = request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f: strokes = json.load(f)", "expected_bounding_box): strokes = [Stroke(**stroke) for stroke in input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box", "PathSignatureExtractor from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [ ( [{\"x\": [1,", "[1, 2, 3], \"y\": [1, 2, 3]}], (1, 1, 2, 2), ), (", "delta=5 ) return path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth = strokes_and_reference_signature strokes =", "f\"{test_case}.json\")) as f: path_signature = np.array(json.load(f)) return (strokes, path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor", "strokes = [Stroke(**stroke) for stroke in input_strokes] path_signature = path_signature_extractor.extract_signature(strokes=strokes) assert (path_signature ==", "[714, 1], \"y\": [3, 4]}, {\"x\": [6, 8], \"y\": [0, 9]}, {\"x\": [100,", "f\"{test_case}.json\")) as f: strokes = json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f: path_signature =", "8), ), ( [ {\"x\": [714, 1], \"y\": [3, 4]}, {\"x\": [6, 8],", "[0, 2, 3, 9]}], (0, 0, 9, 8), ), ( [ {\"x\": [714,", "9, 8), ), ( [ {\"x\": [714, 1], \"y\": [3, 4]}, {\"x\": [6,", "[Stroke(**stroke) for stroke in input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box == expected_bounding_box def", "return path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth = strokes_and_reference_signature strokes = [Stroke(**stroke) for", "9]}], (0, 0, 9, 8), ), ( [ {\"x\": [714, 1], \"y\": [3,", "rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5 ) return path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth", "json import os import numpy as np import pytest from py_path_signature.data_models.stroke import Stroke", "if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request): test_case = request.param with open(os.path.join(TEST_DATA_INPUT_DIR,", "f: strokes = json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f: path_signature = np.array(json.load(f)) return", "PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box == expected_bounding_box def list_test_cases(): return [ os.path.splitext(case)[0] for case in", "1, 10, 713), ), ], ) def test_bounding_box(input_strokes, expected_bounding_box): strokes = [Stroke(**stroke) for", "path_signature_extractor(): path_signature_extractor = PathSignatureExtractor( order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5 ) return path_signature_extractor", "), ( [ {\"x\": [714, 1], \"y\": [3, 4]}, {\"x\": [6, 8], \"y\":", "strokes = json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f: path_signature = np.array(json.load(f)) return (strokes,", "[0, 9]}, {\"x\": [100, 8], \"y\": [10, 9]}, ], (0, 1, 10, 713),", "= strokes_and_reference_signature strokes = [Stroke(**stroke) for stroke in input_strokes] path_signature = path_signature_extractor.extract_signature(strokes=strokes) assert", "json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f: path_signature = np.array(json.load(f)) return (strokes, path_signature) @pytest.fixture(scope=\"class\")", "def list_test_cases(): return [ os.path.splitext(case)[0] for case in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ]", "as np import pytest from py_path_signature.data_models.stroke import Stroke from py_path_signature.path_signature_extractor import PathSignatureExtractor from", "expected_bounding_box def list_test_cases(): return [ os.path.splitext(case)[0] for case in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case))", "3, 4]}, {\"x\": [6, 8, 2, 3], \"y\": [0, 2, 3, 9]}], (0,", "{\"x\": [100, 8], \"y\": [10, 9]}, ], (0, 1, 10, 713), ), ],", "in input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box == expected_bounding_box def list_test_cases(): return [", "np import pytest from py_path_signature.data_models.stroke import Stroke from py_path_signature.path_signature_extractor import PathSignatureExtractor from .conftest", "[1, 2, 3]}], (1, 1, 2, 2), ), ( [{\"x\": [0, 1, 2,", "2, 3], \"y\": [0, 2, 3, 9]}], (0, 0, 9, 8), ), (", ") def test_bounding_box(input_strokes, expected_bounding_box): strokes = [Stroke(**stroke) for stroke in input_strokes] bounding_box =", "[6, 8], \"y\": [0, 9]}, {\"x\": [100, 8], \"y\": [10, 9]}, ], (0,", "as f: strokes = json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f: path_signature = np.array(json.load(f))", "0, 9, 8), ), ( [ {\"x\": [714, 1], \"y\": [3, 4]}, {\"x\":", "2, 3, 9]}], (0, 0, 9, 8), ), ( [ {\"x\": [714, 1],", "test_bounding_box(input_strokes, expected_bounding_box): strokes = [Stroke(**stroke) for stroke in input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert", "def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth = strokes_and_reference_signature strokes = [Stroke(**stroke) for stroke in", "= [Stroke(**stroke) for stroke in input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box == expected_bounding_box", "9]}, ], (0, 1, 10, 713), ), ], ) def test_bounding_box(input_strokes, expected_bounding_box): strokes", "\"input_strokes, expected_bounding_box\", [ ( [{\"x\": [1, 2, 3], \"y\": [1, 2, 3]}], (1,", "return (strokes, path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor = PathSignatureExtractor( order=2, rendering_size=(128, -1), min_rendering_dimension=5,", "3, 9]}], (0, 0, 9, 8), ), ( [ {\"x\": [714, 1], \"y\":", "pytest from py_path_signature.data_models.stroke import Stroke from py_path_signature.path_signature_extractor import PathSignatureExtractor from .conftest import TEST_DATA_INPUT_DIR,", "PathSignatureExtractor( order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5 ) return path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature):", ".conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [ ( [{\"x\": [1, 2, 3],", "def test_bounding_box(input_strokes, expected_bounding_box): strokes = [Stroke(**stroke) for stroke in input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes)", "1], \"y\": [3, 4]}, {\"x\": [6, 8], \"y\": [0, 9]}, {\"x\": [100, 8],", "for case in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request): test_case", "@pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [ ( [{\"x\": [1, 2, 3], \"y\": [1, 2, 3]}],", "{\"x\": [6, 8], \"y\": [0, 9]}, {\"x\": [100, 8], \"y\": [10, 9]}, ],", "[{\"x\": [1, 2, 3], \"y\": [1, 2, 3]}], (1, 1, 2, 2), ),", "bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box == expected_bounding_box def list_test_cases(): return [ os.path.splitext(case)[0] for", "(1, 1, 2, 2), ), ( [{\"x\": [0, 1, 2, 3], \"y\": [1,", "), ( [{\"x\": [0, 1, 2, 3], \"y\": [1, 2, 3, 4]}, {\"x\":", "os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request): test_case = request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\"))", "[ {\"x\": [714, 1], \"y\": [3, 4]}, {\"x\": [6, 8], \"y\": [0, 9]},", "import pytest from py_path_signature.data_models.stroke import Stroke from py_path_signature.path_signature_extractor import PathSignatureExtractor from .conftest import", "open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f: strokes = json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f: path_signature", "\"y\": [1, 2, 3]}], (1, 1, 2, 2), ), ( [{\"x\": [0, 1,", "3], \"y\": [1, 2, 3]}], (1, 1, 2, 2), ), ( [{\"x\": [0,", "with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f: strokes = json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f:", "2, 3, 4]}, {\"x\": [6, 8, 2, 3], \"y\": [0, 2, 3, 9]}],", "2, 3], \"y\": [1, 2, 3]}], (1, 1, 2, 2), ), ( [{\"x\":", "min_rendering_dimension=5, max_aspect_ratio=30, delta=5 ) return path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth = strokes_and_reference_signature", "{\"x\": [6, 8, 2, 3], \"y\": [0, 2, 3, 9]}], (0, 0, 9,", "numpy as np import pytest from py_path_signature.data_models.stroke import Stroke from py_path_signature.path_signature_extractor import PathSignatureExtractor", "( [{\"x\": [0, 1, 2, 3], \"y\": [1, 2, 3, 4]}, {\"x\": [6,", "strokes = [Stroke(**stroke) for stroke in input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box ==", "= PathSignatureExtractor( order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5 ) return path_signature_extractor def test_image_signatures(path_signature_extractor,", "8], \"y\": [0, 9]}, {\"x\": [100, 8], \"y\": [10, 9]}, ], (0, 1,", "( [{\"x\": [1, 2, 3], \"y\": [1, 2, 3]}], (1, 1, 2, 2),", "as f: path_signature = np.array(json.load(f)) return (strokes, path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor =", "3], \"y\": [0, 2, 3, 9]}], (0, 0, 9, 8), ), ( [", "], ) def test_bounding_box(input_strokes, expected_bounding_box): strokes = [Stroke(**stroke) for stroke in input_strokes] bounding_box", "= [Stroke(**stroke) for stroke in input_strokes] path_signature = path_signature_extractor.extract_signature(strokes=strokes) assert (path_signature == path_signature_groundtruth).all()", "assert bounding_box == expected_bounding_box def list_test_cases(): return [ os.path.splitext(case)[0] for case in os.listdir(TEST_DATA_INPUT_DIR)", "f: path_signature = np.array(json.load(f)) return (strokes, path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor = PathSignatureExtractor(", "[6, 8, 2, 3], \"y\": [0, 2, 3, 9]}], (0, 0, 9, 8),", "8, 2, 3], \"y\": [0, 2, 3, 9]}], (0, 0, 9, 8), ),", "case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request): test_case = request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as", "= json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f: path_signature = np.array(json.load(f)) return (strokes, path_signature)", "= np.array(json.load(f)) return (strokes, path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor = PathSignatureExtractor( order=2, rendering_size=(128,", "params=list_test_cases()) def strokes_and_reference_signature(request): test_case = request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f: strokes =", "np.array(json.load(f)) return (strokes, path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor = PathSignatureExtractor( order=2, rendering_size=(128, -1),", ") return path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth = strokes_and_reference_signature strokes = [Stroke(**stroke)", "order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5 ) return path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes,", "import os import numpy as np import pytest from py_path_signature.data_models.stroke import Stroke from", "3]}], (1, 1, 2, 2), ), ( [{\"x\": [0, 1, 2, 3], \"y\":", "import Stroke from py_path_signature.path_signature_extractor import PathSignatureExtractor from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes,", "expected_bounding_box\", [ ( [{\"x\": [1, 2, 3], \"y\": [1, 2, 3]}], (1, 1,", "import PathSignatureExtractor from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [ ( [{\"x\":", "path_signature_groundtruth = strokes_and_reference_signature strokes = [Stroke(**stroke) for stroke in input_strokes] path_signature = path_signature_extractor.extract_signature(strokes=strokes)", "2, 2), ), ( [{\"x\": [0, 1, 2, 3], \"y\": [1, 2, 3,", "path_signature_extractor = PathSignatureExtractor( order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5 ) return path_signature_extractor def", "(0, 0, 9, 8), ), ( [ {\"x\": [714, 1], \"y\": [3, 4]},", "[{\"x\": [0, 1, 2, 3], \"y\": [1, 2, 3, 4]}, {\"x\": [6, 8,", "import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [ ( [{\"x\": [1, 2, 3], \"y\":", "@pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request): test_case = request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f: strokes", "), ], ) def test_bounding_box(input_strokes, expected_bounding_box): strokes = [Stroke(**stroke) for stroke in input_strokes]", "[0, 1, 2, 3], \"y\": [1, 2, 3, 4]}, {\"x\": [6, 8, 2,", "9]}, {\"x\": [100, 8], \"y\": [10, 9]}, ], (0, 1, 10, 713), ),", "py_path_signature.path_signature_extractor import PathSignatureExtractor from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [ (", "[3, 4]}, {\"x\": [6, 8], \"y\": [0, 9]}, {\"x\": [100, 8], \"y\": [10,", "request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f: strokes = json.load(f) with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as", "== expected_bounding_box def list_test_cases(): return [ os.path.splitext(case)[0] for case in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR,", "\"y\": [10, 9]}, ], (0, 1, 10, 713), ), ], ) def test_bounding_box(input_strokes,", "(strokes, path_signature) @pytest.fixture(scope=\"class\") def path_signature_extractor(): path_signature_extractor = PathSignatureExtractor( order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30,", "713), ), ], ) def test_bounding_box(input_strokes, expected_bounding_box): strokes = [Stroke(**stroke) for stroke in", "10, 713), ), ], ) def test_bounding_box(input_strokes, expected_bounding_box): strokes = [Stroke(**stroke) for stroke", "with open(os.path.join(TEST_DATA_REFERENCE_DIR, f\"{test_case}.json\")) as f: path_signature = np.array(json.load(f)) return (strokes, path_signature) @pytest.fixture(scope=\"class\") def", "\"y\": [0, 2, 3, 9]}], (0, 0, 9, 8), ), ( [ {\"x\":", "max_aspect_ratio=30, delta=5 ) return path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth = strokes_and_reference_signature strokes", "list_test_cases(): return [ os.path.splitext(case)[0] for case in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\",", "stroke in input_strokes] bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box == expected_bounding_box def list_test_cases(): return", "input_strokes, path_signature_groundtruth = strokes_and_reference_signature strokes = [Stroke(**stroke) for stroke in input_strokes] path_signature =", "3], \"y\": [1, 2, 3, 4]}, {\"x\": [6, 8, 2, 3], \"y\": [0,", "TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR @pytest.mark.parametrize( \"input_strokes, expected_bounding_box\", [ ( [{\"x\": [1, 2, 3], \"y\": [1,", "[1, 2, 3, 4]}, {\"x\": [6, 8, 2, 3], \"y\": [0, 2, 3,", "= PathSignatureExtractor.calculate_bounding_box(strokes=strokes) assert bounding_box == expected_bounding_box def list_test_cases(): return [ os.path.splitext(case)[0] for case", "strokes_and_reference_signature(request): test_case = request.param with open(os.path.join(TEST_DATA_INPUT_DIR, f\"{test_case}.json\")) as f: strokes = json.load(f) with", "def path_signature_extractor(): path_signature_extractor = PathSignatureExtractor( order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5 ) return", "1, 2, 2), ), ( [{\"x\": [0, 1, 2, 3], \"y\": [1, 2,", "[ ( [{\"x\": [1, 2, 3], \"y\": [1, 2, 3]}], (1, 1, 2,", "-1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5 ) return path_signature_extractor def test_image_signatures(path_signature_extractor, strokes_and_reference_signature): input_strokes, path_signature_groundtruth =", "( [ {\"x\": [714, 1], \"y\": [3, 4]}, {\"x\": [6, 8], \"y\": [0,", "\"y\": [3, 4]}, {\"x\": [6, 8], \"y\": [0, 9]}, {\"x\": [100, 8], \"y\":", "in os.listdir(TEST_DATA_INPUT_DIR) if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case)) ] @pytest.fixture(scope=\"function\", params=list_test_cases()) def strokes_and_reference_signature(request): test_case = request.param", "\"y\": [1, 2, 3, 4]}, {\"x\": [6, 8, 2, 3], \"y\": [0, 2," ]
[ "\"AMI-Uploader\" errorlog = \"-\" loglevel = \"info\" accesslog = \"-\" access_log_format = '%(h)s", "loglevel = \"info\" accesslog = \"-\" access_log_format = '%(h)s %(l)s %(u)s %(t)s \"%(r)s\"", "\"info\" accesslog = \"-\" access_log_format = '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s", "120 proc_name = \"AMI-Uploader\" errorlog = \"-\" loglevel = \"info\" accesslog = \"-\"", "# pylint: skip-file bind = \"0.0.0.0:4000\" workers = 4 timeout = 120 proc_name", "= \"-\" access_log_format = '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\" \"%(a)s\"'", "= 4 timeout = 120 proc_name = \"AMI-Uploader\" errorlog = \"-\" loglevel =", "pylint: skip-file bind = \"0.0.0.0:4000\" workers = 4 timeout = 120 proc_name =", "accesslog = \"-\" access_log_format = '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\"", "4 timeout = 120 proc_name = \"AMI-Uploader\" errorlog = \"-\" loglevel = \"info\"", "= \"-\" loglevel = \"info\" accesslog = \"-\" access_log_format = '%(h)s %(l)s %(u)s", "proc_name = \"AMI-Uploader\" errorlog = \"-\" loglevel = \"info\" accesslog = \"-\" access_log_format", "skip-file bind = \"0.0.0.0:4000\" workers = 4 timeout = 120 proc_name = \"AMI-Uploader\"", "= \"AMI-Uploader\" errorlog = \"-\" loglevel = \"info\" accesslog = \"-\" access_log_format =", "<reponame>NCKU-CCS/energy-blockchain # pylint: skip-file bind = \"0.0.0.0:4000\" workers = 4 timeout = 120", "\"-\" loglevel = \"info\" accesslog = \"-\" access_log_format = '%(h)s %(l)s %(u)s %(t)s", "timeout = 120 proc_name = \"AMI-Uploader\" errorlog = \"-\" loglevel = \"info\" accesslog", "= \"0.0.0.0:4000\" workers = 4 timeout = 120 proc_name = \"AMI-Uploader\" errorlog =", "bind = \"0.0.0.0:4000\" workers = 4 timeout = 120 proc_name = \"AMI-Uploader\" errorlog", "\"0.0.0.0:4000\" workers = 4 timeout = 120 proc_name = \"AMI-Uploader\" errorlog = \"-\"", "errorlog = \"-\" loglevel = \"info\" accesslog = \"-\" access_log_format = '%(h)s %(l)s", "= \"info\" accesslog = \"-\" access_log_format = '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s", "workers = 4 timeout = 120 proc_name = \"AMI-Uploader\" errorlog = \"-\" loglevel", "= 120 proc_name = \"AMI-Uploader\" errorlog = \"-\" loglevel = \"info\" accesslog =" ]
[ "\"show building --building tu\" self.notfoundtest(command.split(\" \")) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestDelBuilding)", "KIND, either express or implied. # See the License for the specific language", "self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\") command", "Unless required by applicable law or agreed to in writing, software # distributed", "(C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the Apache License, Version 2.0 (the", "tu\") command = \"del building --building tu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq", "\"--city\", \"ex\", \"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command = \"delete_building_aq -building bz\" errstr =", "\")) self.dsdb_verify() def test_200_del_building_notexist(self): command = \"del building --building bldg-not-exist\" out = self.notfoundtest(command.split(\"", "\"DSDB does not have building bz defined, proceeding.\", command) self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self,", "does not have building bz defined, proceeding.\", command) self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\",", "= \"del building --building nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_200_del_building_notexist(self): command = \"del", "\"del building --building bz\" err = self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB does not have", "nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017", "delete building nettest, \" \"networks were found using this location.\", command) self.dsdb_verify(empty=True) def", "nettest\" err = self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad Request: Could not delete building nettest,", "this file except in compliance with the License. # You may obtain a", "ANY KIND, either express or implied. # See the License for the specific", "building --building bz\" err = self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB does not have building", "command = \"show building --building tu\" self.notfoundtest(command.split(\" \")) if __name__ == '__main__': suite", "= self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building bldg-not-exist not found.\", command) def test_300_verify_bu(self): command =", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "self.dsdb_expect(\"delete_building_aq -building bu\") command = \"del building --building bu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def", "\"Building bldg-not-exist not found.\", command) def test_300_verify_bu(self): command = \"show building --building bu\"", "dsdb_command = \"delete_building_aq -building bz\" errstr = \"bldg bz doesn't exists\" self.dsdb_expect(dsdb_command, True,", "command) self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\", \"building\", \"nettest\", comments=\"Made-up network\") def", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building tu\") command = \"del", "\"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\") command = \"del building --building", "the specific language governing permissions and # limitations under the License. \"\"\"Module for", "OF ANY KIND, either express or implied. # See the License for the", "command = \"del building --building nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_200_del_building_notexist(self): command =", "building command.\"\"\" import unittest if __name__ == \"__main__\": import utils utils.import_depends() from brokertest", "= self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad Request: Could not delete building nettest, \" \"networks", "bz doesn't exists\" self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command = \"del building --building", "utils utils.import_depends() from brokertest import TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq", "if __name__ == \"__main__\": import utils utils.import_depends() from brokertest import TestBrokerCommand class TestDelBuilding(TestBrokerCommand):", "--building bu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building cards\") command", "test_300_verify_tu(self): command = \"show building --building tu\" self.notfoundtest(command.split(\" \")) if __name__ == '__main__':", "utils.import_depends() from brokertest import TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building", "command = \"show building --building bu\" self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self): command = \"show", "test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building tu\") command = \"del building --building tu\" self.noouttest(command.split(\"", "self.matchoutput(err, \"Bad Request: Could not delete building nettest, \" \"networks were found using", "self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\") command = \"del building --building nettest\" self.noouttest(command.split(\" \"))", "unittest if __name__ == \"__main__\": import utils utils.import_depends() from brokertest import TestBrokerCommand class", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright", "from brokertest import TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building bu\")", "\")) self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz -city ex \" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\",", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "test_121_del_nettest_fail(self): # try delete building command = \"del building --building nettest\" err =", "this location.\", command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq", "= \"delete_building_aq -building bz\" errstr = \"bldg bz doesn't exists\" self.dsdb_expect(dsdb_command, True, errstr)", "not delete building nettest, \" \"networks were found using this location.\", command) self.dsdb_verify(empty=True)", "found using this location.\", command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\",", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "command.\"\"\" import unittest if __name__ == \"__main__\": import utils utils.import_depends() from brokertest import", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building bu\") command = \"del building --building", "\"del building --building nettest\" err = self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad Request: Could not", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "-city ex \" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command = [\"add\", \"building\", \"--building\", \"bz\",", "#!/usr/bin/env python # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab", "command) def test_300_verify_bu(self): command = \"show building --building bu\" self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self):", "required by applicable law or agreed to in writing, software # distributed under", "applicable law or agreed to in writing, software # distributed under the License", "brokertest import TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building bu\") command", "bu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building cards\") command =", "err = self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad Request: Could not delete building nettest, \"", "self.dsdb_expect(\"delete_building_aq -building nettest\") command = \"del building --building nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify() def", "import unittest if __name__ == \"__main__\": import utils utils.import_depends() from brokertest import TestBrokerCommand", "self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad Request: Could not delete building nettest, \" \"networks were", "or agreed to in writing, software # distributed under the License is distributed", "nettest\") command = \"del building --building nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_200_del_building_notexist(self): command", "test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building cards\") command = \"del building --building cards\" self.noouttest(command.split(\"", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "# ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor #", "\"del building --building nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_200_del_building_notexist(self): command = \"del building", "err = self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB does not have building bz defined, proceeding.\",", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building bu\") command = \"del building --building bu\" self.noouttest(command.split(\" \"))", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building cards\") command = \"del building --building cards\"", "License. # You may obtain a copy of the License at # #", "\"--building\", \"bz\", \"--city\", \"ex\", \"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command = \"delete_building_aq -building bz\"", "\")) self.matchoutput(err, \"DSDB does not have building bz defined, proceeding.\", command) self.dsdb_verify() def", "\"show building --building bu\" self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self): command = \"show building --building", "compliance with the License. # You may obtain a copy of the License", "python # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4", "= \"del building --building bldg-not-exist\" out = self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building bldg-not-exist not", "shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the Apache", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command = [\"add\", \"building\", \"--building\", \"bz\", \"--city\", \"ex\", \"--address\", \"Nowhere\"]", "set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed", "under the License. \"\"\"Module for testing the del building command.\"\"\" import unittest if", "\"del building --building tu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz -city", "errstr = \"bldg bz doesn't exists\" self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command =", "\" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command = [\"add\", \"building\", \"--building\", \"bz\", \"--city\", \"ex\",", "\")) self.matchoutput(out, \"Building bldg-not-exist not found.\", command) def test_300_verify_bu(self): command = \"show building", "try delete building command = \"del building --building nettest\" err = self.badrequesttest(command.split(\" \"))", "--building tu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz -city ex \"", "self.dsdb_expect(\"delete_building_aq -building cards\") command = \"del building --building cards\" self.noouttest(command.split(\" \")) self.dsdb_verify() def", "\"bu\") self.dsdb_expect(\"delete_building_aq -building bu\") command = \"del building --building bu\" self.noouttest(command.split(\" \")) self.dsdb_verify()", "not use this file except in compliance with the License. # You may", "test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\", \"building\", \"nettest\", comments=\"Made-up network\") def test_121_del_nettest_fail(self): # try", "self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB does not have building bz defined, proceeding.\", command) self.dsdb_verify()", "License, Version 2.0 (the \"License\"); # you may not use this file except", "= self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB does not have building bz defined, proceeding.\", command)", "# # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the Apache License,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building tu\") command = \"del building --building", "# you may not use this file except in compliance with the License.", "= \"del building --building nettest\" err = self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad Request: Could", "agreed to in writing, software # distributed under the License is distributed on", "== \"__main__\": import utils utils.import_depends() from brokertest import TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self):", "= \"bldg bz doesn't exists\" self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command = \"del", "testing the del building command.\"\"\" import unittest if __name__ == \"__main__\": import utils", "command = [\"add\", \"building\", \"--building\", \"bz\", \"--city\", \"ex\", \"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command", "(the \"License\"); # you may not use this file except in compliance with", "\"\"\"Module for testing the del building command.\"\"\" import unittest if __name__ == \"__main__\":", "# Unless required by applicable law or agreed to in writing, software #", "bz\" err = self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB does not have building bz defined,", "\"del building --building bu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building", "by applicable law or agreed to in writing, software # distributed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "-*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor", "\"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command = [\"add\", \"building\", \"--building\", \"bz\", \"--city\", \"ex\", \"--address\",", "-building tu\") command = \"del building --building tu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_110_del_bunotindsdb(self):", "bldg-not-exist not found.\", command) def test_300_verify_bu(self): command = \"show building --building bu\" self.notfoundtest(command.split(\"", "bu\" self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self): command = \"show building --building tu\" self.notfoundtest(command.split(\" \"))", "License. \"\"\"Module for testing the del building command.\"\"\" import unittest if __name__ ==", "out = self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building bldg-not-exist not found.\", command) def test_300_verify_bu(self): command", "file except in compliance with the License. # You may obtain a copy", "--building bu\" self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self): command = \"show building --building tu\" self.notfoundtest(command.split(\"", "-building cards\") command = \"del building --building cards\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_tu(self):", "command = \"del building --building nettest\" err = self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad Request:", "self.dsdb_verify() def test_200_del_building_notexist(self): command = \"del building --building bldg-not-exist\" out = self.notfoundtest(command.split(\" \"))", "__name__ == \"__main__\": import utils utils.import_depends() from brokertest import TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def", "\"del building --building bldg-not-exist\" out = self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building bldg-not-exist not found.\",", "License for the specific language governing permissions and # limitations under the License.", "self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building cards\") command = \"del", "TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building bu\") command = \"del", "# try delete building command = \"del building --building nettest\" err = self.badrequesttest(command.split(\"", "language governing permissions and # limitations under the License. \"\"\"Module for testing the", "tu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz -city ex \" \"-building_addr", "to in writing, software # distributed under the License is distributed on an", "True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command = \"del building --building bz\" err = self.statustest(command.split(\"", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "for the specific language governing permissions and # limitations under the License. \"\"\"Module", "were found using this location.\", command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self):", "self.noouttest(command) self.dsdb_verify() dsdb_command = \"delete_building_aq -building bz\" errstr = \"bldg bz doesn't exists\"", "test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building bu\") command = \"del building --building bu\" self.noouttest(command.split(\"", "def test_121_del_nettest_fail(self): # try delete building command = \"del building --building nettest\" err", "or implied. # See the License for the specific language governing permissions and", "-building bz\" errstr = \"bldg bz doesn't exists\" self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\")", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "cards\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building tu\") command =", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "building --building bu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building cards\")", "command = \"del building --building bz\" err = self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB does", "building --building tu\" self.notfoundtest(command.split(\" \")) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestDelBuilding) unittest.TextTestRunner(verbosity=2).run(suite)", "building bz defined, proceeding.\", command) self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\", \"building\",", "--building cards\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building tu\") command", "self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command = [\"add\", \"building\", \"--building\", \"bz\", \"--city\", \"ex\", \"--address\", \"Nowhere\"] self.noouttest(command)", "def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\") command =", "limitations under the License. \"\"\"Module for testing the del building command.\"\"\" import unittest", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self): command = \"show building --building tu\" self.notfoundtest(command.split(\" \")) if", "\")) self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building tu\") command = \"del building", "\"del building --building cards\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building", "# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the Apache License, Version", "Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the Apache License, Version 2.0", "softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the", "= \"show building --building tu\" self.notfoundtest(command.split(\" \")) if __name__ == '__main__': suite =", "\"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\") command = \"del building --building nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify()", "doesn't exists\" self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command = \"del building --building bz\"", "= \"del building --building bu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq", "self.dsdb_verify() dsdb_command = \"delete_building_aq -building bz\" errstr = \"bldg bz doesn't exists\" self.dsdb_expect(dsdb_command,", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "self.dsdb_expect(\"add_building_aq -building_name bz -city ex \" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command = [\"add\",", "you may not use this file except in compliance with the License. #", "24, \"unknown\", \"building\", \"nettest\", comments=\"Made-up network\") def test_121_del_nettest_fail(self): # try delete building command", "\"bz\", \"--city\", \"ex\", \"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command = \"delete_building_aq -building bz\" errstr", "building command = \"del building --building nettest\" err = self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad", "command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\")", "def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\") command = \"del building --building nettest\"", "bz\" errstr = \"bldg bz doesn't exists\" self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command", "for testing the del building command.\"\"\" import unittest if __name__ == \"__main__\": import", "cards\") command = \"del building --building cards\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\",", "\"unknown\", \"building\", \"nettest\", comments=\"Made-up network\") def test_121_del_nettest_fail(self): # try delete building command =", "bldg-not-exist\" out = self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building bldg-not-exist not found.\", command) def test_300_verify_bu(self):", "use this file except in compliance with the License. # You may obtain", "specific language governing permissions and # limitations under the License. \"\"\"Module for testing", "defined, proceeding.\", command) self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\", \"building\", \"nettest\", comments=\"Made-up", "--building bz\" err = self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB does not have building bz", "test_300_verify_bu(self): command = \"show building --building bu\" self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self): command =", "self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\", \"building\", \"nettest\", comments=\"Made-up network\") def test_121_del_nettest_fail(self): # try delete", "bu\") command = \"del building --building bu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\",", "test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz -city ex \" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command =", "bz defined, proceeding.\", command) self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\", \"building\", \"nettest\",", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "building nettest, \" \"networks were found using this location.\", command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self):", "not found.\", command) def test_300_verify_bu(self): command = \"show building --building bu\" self.notfoundtest(command.split(\" \"))", "\"building\", \"--building\", \"bz\", \"--city\", \"ex\", \"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command = \"delete_building_aq -building", "= \"del building --building bz\" err = self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB does not", "2.0 (the \"License\"); # you may not use this file except in compliance", "self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz -city ex \" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\")", "ex \" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command = [\"add\", \"building\", \"--building\", \"bz\", \"--city\",", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\") command = \"del building --building nettest\" self.noouttest(command.split(\"", "\"nettest\", comments=\"Made-up network\") def test_121_del_nettest_fail(self): # try delete building command = \"del building", "Contributor # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building bldg-not-exist not found.\", command) def test_300_verify_bu(self): command = \"show", "command = \"del building --building bu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\")", "test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\") command = \"del", "# # Unless required by applicable law or agreed to in writing, software", "building --building nettest\" err = self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad Request: Could not delete", "\"tu\") self.dsdb_expect(\"delete_building_aq -building tu\") command = \"del building --building tu\" self.noouttest(command.split(\" \")) self.dsdb_verify()", "express or implied. # See the License for the specific language governing permissions", "\"Bad Request: Could not delete building nettest, \" \"networks were found using this", "def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building bu\") command = \"del building --building bu\"", "\"delete_building_aq -building bz\" errstr = \"bldg bz doesn't exists\" self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\",", "either express or implied. # See the License for the specific language governing", "def test_300_verify_tu(self): command = \"show building --building tu\" self.notfoundtest(command.split(\" \")) if __name__ ==", "\"cards\") self.dsdb_expect(\"delete_building_aq -building cards\") command = \"del building --building cards\" self.noouttest(command.split(\" \")) self.dsdb_verify()", "building --building bu\" self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self): command = \"show building --building tu\"", "bz -city ex \" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command = [\"add\", \"building\", \"--building\",", "ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # #", "indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C)", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "Could not delete building nettest, \" \"networks were found using this location.\", command)", "the License. # You may obtain a copy of the License at #", "self.matchoutput(err, \"DSDB does not have building bz defined, proceeding.\", command) self.dsdb_verify() def test_120_add_nettest_net(self):", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "permissions and # limitations under the License. \"\"\"Module for testing the del building", "# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4:", "-building bu\") command = \"del building --building bu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_ex(self):", "[\"add\", \"building\", \"--building\", \"bz\", \"--city\", \"ex\", \"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command = \"delete_building_aq", "\"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command = \"delete_building_aq -building bz\" errstr = \"bldg bz", "= [\"add\", \"building\", \"--building\", \"bz\", \"--city\", \"ex\", \"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command =", "\")) def test_300_verify_tu(self): command = \"show building --building tu\" self.notfoundtest(command.split(\" \")) if __name__", "self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command = \"del building --building bz\" err =", "cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # #", "not have building bz defined, proceeding.\", command) self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24,", "\" \"networks were found using this location.\", command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\")", "with the License. # You may obtain a copy of the License at", "command = \"del building --building cards\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\")", "using this location.\", command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\")", "self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building cards\") command = \"del building --building", "\")) self.dsdb_verify() def test_100_del_ex(self): self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building cards\") command = \"del building", "command = \"del building --building tu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name", "have building bz defined, proceeding.\", command) self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\",", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "= \"del building --building cards\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq", "\"nettest_net\", 24, \"unknown\", \"building\", \"nettest\", comments=\"Made-up network\") def test_121_del_nettest_fail(self): # try delete building", "= \"del building --building tu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz", "= \"show building --building bu\" self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self): command = \"show building", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under the Apache License, Version 2.0 (the \"License\");", "def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building tu\") command = \"del building --building tu\"", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "network\") def test_121_del_nettest_fail(self): # try delete building command = \"del building --building nettest\"", "building --building nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_200_del_building_notexist(self): command = \"del building --building", "-*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: #", "def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz -city ex \" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command", "import utils utils.import_depends() from brokertest import TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\")", "import TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building bu\") command =", "\"__main__\": import utils utils.import_depends() from brokertest import TestBrokerCommand class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\",", "self.matchoutput(out, \"Building bldg-not-exist not found.\", command) def test_300_verify_bu(self): command = \"show building --building", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the del building command.\"\"\" import unittest if __name__ == \"__main__\": import utils utils.import_depends()", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "\"bz\") command = \"del building --building bz\" err = self.statustest(command.split(\" \")) self.matchoutput(err, \"DSDB", "command = \"del building --building bldg-not-exist\" out = self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building bldg-not-exist", "See the License for the specific language governing permissions and # limitations under", "\"building\", \"nettest\", comments=\"Made-up network\") def test_121_del_nettest_fail(self): # try delete building command = \"del", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "\")) self.matchoutput(err, \"Bad Request: Could not delete building nettest, \" \"networks were found", "self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\", \"building\", \"nettest\", comments=\"Made-up network\") def test_121_del_nettest_fail(self):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "delete building command = \"del building --building nettest\" err = self.badrequesttest(command.split(\" \")) self.matchoutput(err,", "nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_200_del_building_notexist(self): command = \"del building --building bldg-not-exist\" out", "self.dsdb_expect(\"delete_building_aq -building tu\") command = \"del building --building tu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def", "self.noouttest(command.split(\" \")) self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz -city ex \" \"-building_addr Nowhere\")", "self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command = \"del building --building bz\" err = self.statustest(command.split(\" \")) self.matchoutput(err,", "class TestDelBuilding(TestBrokerCommand): def test_100_del_bu(self): self.dsdb_expect_del_campus_building(\"ny\", \"bu\") self.dsdb_expect(\"delete_building_aq -building bu\") command = \"del building", "self.dsdb_expect_del_campus_building(\"ta\", \"cards\") self.dsdb_expect(\"delete_building_aq -building cards\") command = \"del building --building cards\" self.noouttest(command.split(\" \"))", "governing permissions and # limitations under the License. \"\"\"Module for testing the del", "--building nettest\" err = self.badrequesttest(command.split(\" \")) self.matchoutput(err, \"Bad Request: Could not delete building", "location.\", command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building", "--building bldg-not-exist\" out = self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building bldg-not-exist not found.\", command) def", "self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building tu\") command = \"del building --building tu\" self.noouttest(command.split(\" \"))", "proceeding.\", command) self.dsdb_verify() def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\", \"building\", \"nettest\", comments=\"Made-up network\")", "def test_200_del_building_notexist(self): command = \"del building --building bldg-not-exist\" out = self.notfoundtest(command.split(\" \")) self.matchoutput(out,", "building --building tu\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_110_del_bunotindsdb(self): self.dsdb_expect(\"add_building_aq -building_name bz -city ex", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "\"ex\", \"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command = \"delete_building_aq -building bz\" errstr = \"bldg", "-building nettest\") command = \"del building --building nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_200_del_building_notexist(self):", "and # limitations under the License. \"\"\"Module for testing the del building command.\"\"\"", "the License. \"\"\"Module for testing the del building command.\"\"\" import unittest if __name__", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "del building command.\"\"\" import unittest if __name__ == \"__main__\": import utils utils.import_depends() from", "nettest, \" \"networks were found using this location.\", command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self,", "found.\", command) def test_300_verify_bu(self): command = \"show building --building bu\" self.notfoundtest(command.split(\" \")) def", "def test_300_verify_bu(self): command = \"show building --building bu\" self.notfoundtest(command.split(\" \")) def test_300_verify_tu(self): command", "\"networks were found using this location.\", command) self.dsdb_verify(empty=True) def test_122_cleanup_nettest_net(self): self.net.dispose_network(self, \"nettest_net\") def", "comments=\"Made-up network\") def test_121_del_nettest_fail(self): # try delete building command = \"del building --building", "test_200_del_building_notexist(self): command = \"del building --building bldg-not-exist\" out = self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building", "self.noouttest(command.split(\" \")) self.dsdb_verify() def test_200_del_building_notexist(self): command = \"del building --building bldg-not-exist\" out =", "expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor # # Licensed under", "building --building bldg-not-exist\" out = self.notfoundtest(command.split(\" \")) self.matchoutput(out, \"Building bldg-not-exist not found.\", command)", "\"bldg bz doesn't exists\" self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command = \"del building", "\"bz\") command = [\"add\", \"building\", \"--building\", \"bz\", \"--city\", \"ex\", \"--address\", \"Nowhere\"] self.noouttest(command) self.dsdb_verify()", "exists\" self.dsdb_expect(dsdb_command, True, errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command = \"del building --building bz\" err", "--building nettest\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_200_del_building_notexist(self): command = \"del building --building bldg-not-exist\"", "\"Nowhere\"] self.noouttest(command) self.dsdb_verify() dsdb_command = \"delete_building_aq -building bz\" errstr = \"bldg bz doesn't", "-building_name bz -city ex \" \"-building_addr Nowhere\") self.dsdb_expect_add_campus_building(\"ta\", \"bz\") command = [\"add\", \"building\",", "errstr) self.dsdb_expect_del_campus_building(\"ta\", \"bz\") command = \"del building --building bz\" err = self.statustest(command.split(\" \"))", "Request: Could not delete building nettest, \" \"networks were found using this location.\",", "# limitations under the License. \"\"\"Module for testing the del building command.\"\"\" import", "self.net.dispose_network(self, \"nettest_net\") def test_130_del_nettest(self): self.dsdb_expect_del_campus_building(\"ny\", \"nettest\") self.dsdb_expect(\"delete_building_aq -building nettest\") command = \"del building", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "building --building cards\" self.noouttest(command.split(\" \")) self.dsdb_verify() def test_100_del_tu(self): self.dsdb_expect_del_campus_building(\"ln\", \"tu\") self.dsdb_expect(\"delete_building_aq -building tu\")", "def test_120_add_nettest_net(self): self.net.allocate_network(self, \"nettest_net\", 24, \"unknown\", \"building\", \"nettest\", comments=\"Made-up network\") def test_121_del_nettest_fail(self): #" ]
[ "gdim == 2 and pdeg == 2: a, L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type,", "Equation 1 - flux through the sides a = L = 0 for", "n[d] * v1 * nbc.ds() for sbc in sim.data['slip_bcs'].get('u', []): pass # L", "* dx # Equation 3 - BDM Phi v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) #", "based on the current state of the Function w \"\"\" # Find the", "mesh: <u⋅n, φ> = <û⋅n, φ> ∀ ϕ ∈ P_{k}(F) for any face", "[]) neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' % d, [])", "# Equation 1 - flux through the sides a = L = 0", "external boundaries a += dot(u, n) * v1 * ds if use_bcs: for", "shape : empty for DG1 # Equation 3 - BDM Phi : empty", "v1 * nbc.ds() for sbc in sim.data['slip_bcs'].get('u', []): pass # L += 0", "VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh) # The mixed function space of the", "'Nedelec 1st kind H(curl)' elements a += dot(u, v2) * dx L +=", "(2009) For each element K in the mesh: <u⋅n, φ> = <û⋅n, φ>", "(w, ϕ) ∀ φ ∈ P_{k-2}(K)^2 (u, ϕ) = (w, ϕ) ∀ φ", "not support ' 'degree %d and dimension %d' % pg ) # Pre-factorize", "new techniques for generating exactly incompressible approximate velocities\" by <NAME> (2009) For each", "FunctionSpace(mesh, 'DGT', k) n = FacetNormal(mesh) v1 = TestFunction(W) u = TrialFunction(V) #", "cont. - flux through external boundaries a += dot(u, n) * v1 *", "v1(R) * dS # Eq. 1 cont. - flux through external boundaries a", "w w = self.w if w is None else w U = self.temp_function.split()", "DG1 function \"\"\" sim = self.simulation k = 1 gdim = 2 mesh", "L, V def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim): \"\"\" Implement the", "'upwind': w_nU = (dot(w, n) + abs(dot(w, n))) / 2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'),", "0 else: L += w[d] * n[d] * v1 * nbc.ds() for sbc", "and 4b in \"Two new techniques for generating exactly incompressible approximate velocities\" by", "at each face. P_{x} is the space of polynomials of order k The", "the projected velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign to w w = self.w if w", "[] for i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs):", "not None: u_hat_dS += dolfin.Constant([D12] * gdim) * dolfin.jump(w, n) # Equation 1", "pdeg = ue.degree() Vout = V else: pdeg = degree Vout = FunctionSpace(V.mesh(),", "through the sides a = L = 0 for R in '+-': a", "TestFunction, TestFunctions, Function from dolfin import dot, as_vector, dx, dS, ds, LocalSolver class", "= self.simulation k = 2 gdim = 2 mesh = w[0].function_space().mesh() V =", "* v1 * ds if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d'", "* w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w,", "ϕ) = (w, ϕ) ∀ φ ∈ P_{k-2}(K)^2 (u, ϕ) = (w, ϕ)", "self.local_solver.solve_local_rhs(self.temp_function) # Assign to w w = self.w if w is None else", "shape using 'Nedelec 1st kind H(curl)' elements a += dot(u, v2) * dx", "switch * w('+') + (1 - switch) * w('-') if D12 is not", "V def run(self, w=None): \"\"\" Perform the projection based on the current state", "P_{k}(K)^2 : ∇⋅ϕ = 0 in K, ϕ⋅n = 0 on ∂K} Here", "return a, L, V def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim): \"\"\"", "the input velocity function in DG2 space and û is the flux at", "a += dot(u, n) * v1 * ds if use_bcs: for d in", "e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3) em = MixedElement([e1, e2, e3]) W = FunctionSpace(mesh,", "BDM projection') V = w[0].function_space() ue = V.ufl_element() gdim = w.ufl_shape[0] if degree", "= FiniteElement('N1curl', mesh.ufl_cell(), k - 1) em = MixedElement([e1, e2]) W = FunctionSpace(mesh,", "L += 0 else: L += w[d] * n[d] * v1 * nbc.ds()", "dx return a, L, V def run(self, w=None): \"\"\" Perform the projection based", "nbc in neumann_bcs + robin_bcs + outlet_bcs: if nbc.enforce_zero_flux: pass # L +=", "∂K} Here w is the input velocity function in DG2 space and û", "self.simulation = simulation simulation.log.info(' Setting up velocity BDM projection') V = w[0].function_space() ue", "* dS L += dot(u_hat_dS, n(R)) * v1(R) * dS # Eq. 1", "w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim): \"\"\" Implement the BDM-like projection using Nedelec", "V def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim): \"\"\" Implement the BDM-like", "u = TrialFunction(V) # The same fluxes that are used in the incompressibility", "Vout = FunctionSpace(V.mesh(), 'DG', degree) pg = (pdeg, gdim) assert ue.family() == 'Discontinuous", "the BDM-like projection using Nedelec elements in the test function \"\"\" sim =", "ds # Equation 2 - internal shape : empty for DG1 # Equation", "* ds # Equation 2 - internal shape a += dot(u, v2) *", "2 and pdeg == 1: a, L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs)", "dot(u, v3) * dx L += dot(w, v3) * dx return a, L,", "∈ P_{k}(K)^2 : ∇⋅ϕ = 0 in K, ϕ⋅n = 0 on ∂K}", "> 1: a, L, V = self._setup_projection_nedelec( w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim", "pass # L += 0 else: L += dot(w, n) * v1 *", "internal shape using 'Nedelec 1st kind H(curl)' elements a += dot(u, v2) *", "<u⋅n, φ> = <û⋅n, φ> ∀ ϕ ∈ P_{k}(F) for any face F", "gdim == 2 and pdeg == 1: a, L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type,", "pg ) # Pre-factorize matrices and store for usage in projection self.local_solver =", "∀ ϕ ∈ P_{k}(F) for any face F ∈ ∂K (u, ϕ) =", "+= dolfin.Constant([D12] * gdim) * dolfin.jump(w, n) # Equation 1 - flux through", "+= 0 else: L += w[d] * n[d] * v1 * nbc.ds() for", "'VelocityBDMProjection does not support ' 'degree %d and dimension %d' % pg )", "v3b = TestFunctions(W) u = TrialFunction(V) # The same fluxes that are used", "incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True, use_nedelec=True, ): \"\"\" Implement equation 4a and 4b in", "support ' 'degree %d and dimension %d' % pg ) # Pre-factorize matrices", "% pg ) # Pre-factorize matrices and store for usage in projection self.local_solver", "= FunctionSpace(V.mesh(), 'DG', degree) pg = (pdeg, gdim) assert ue.family() == 'Discontinuous Lagrange'", "polynomials of order k The flux type can be 'central' or 'upwind' \"\"\"", "# Equation 2 - internal shape a += dot(u, v2) * dx L", "<NAME> (2009) For each element K in the mesh: <u⋅n, φ> = <û⋅n,", "in the incompressibility equation if incompressibility_flux_type == 'central': u_hat_dS = dolfin.avg(w) elif incompressibility_flux_type", "MixedElement, FunctionSpace, VectorFunctionSpace from dolfin import FacetNormal, TrialFunction, TestFunction, TestFunctions, Function from dolfin", "mixed function space of the projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k)", "K, ϕ⋅n = 0 on ∂K} Here w is the input velocity function", "else: L += w[d] * n[d] * v1 * nbc.ds() for sbc in", "dot(u, v2) * dx L += dot(w, v2) * dx # Equation 3", "ϕ) ∀ φ ∈ {ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ = 0 in K,", "dot(w, n) * v1 * ds # Equation 2 - internal shape using", "= self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs) elif gdim == 2 and pdeg == 2:", "H(curl)' elements a += dot(u, v2) * dx L += dot(w, v2) *", "a += dot(u, v2) * dx L += dot(w, v2) * dx return", "L += dot(u_hat_dS, n(R)) * v1(R) * dS # Eq. 1 cont. -", "* dx return a, L, V def run(self, w=None): \"\"\" Perform the projection", "dot(w, v2) * dx return a, L, V def run(self, w=None): \"\"\" Perform", "- internal shape : empty for DG1 # Equation 3 - BDM Phi", "dot(w, v3) * dx return a, L, V def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12,", "dx return a, L, V def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim):", "mesh.ufl_cell(), k) e2 = FiniteElement('N1curl', mesh.ufl_cell(), k - 1) em = MixedElement([e1, e2])", "pdeg, gdim): \"\"\" Implement the BDM-like projection using Nedelec elements in the test", "dolfin.avg(w) elif incompressibility_flux_type == 'upwind': w_nU = (dot(w, n) + abs(dot(w, n))) /", "∀ φ ∈ P_{k-2}(K)^2 (u, ϕ) = (w, ϕ) ∀ φ ∈ {ϕ", "û is the flux at each face. P_{x} is the space of polynomials", "(1 - switch) * w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12]", "em) v1, v2, v3b = TestFunctions(W) u = TrialFunction(V) # The same fluxes", "D12, use_bcs, pdeg, gdim): \"\"\" Implement the BDM-like projection using Nedelec elements in", "pdeg, gdim ) elif gdim == 2 and pdeg == 1: a, L,", "The mixed function space of the projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(),", "sim.data['slip_bcs'].get('u', []): pass # L += 0 else: L += dot(w, n) *", "a = L = 0 for R in '+-': a += dot(u(R), n(R))", "projection where the result is BDM embeded in a DG2 function \"\"\" sim", "V.sub(i))) def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where the", "= degree Vout = FunctionSpace(V.mesh(), 'DG', degree) pg = (pdeg, gdim) assert ue.family()", "d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' % d, []) outlet_bcs = sim.data['outlet_bcs'] for dbc", "TestFunction(W) u = TrialFunction(V) # The same fluxes that are used in the", "dirichlet_bcs: u_bc = dbc.func() L += u_bc * n[d] * v1 * dbc.ds()", "the projection where the result is BDM embeded in a DG2 function \"\"\"", "using 'Nedelec 1st kind H(curl)' elements a += dot(u, v2) * dx L", "3 - BDM Phi : empty for DG1 return a, L, V def", "0 in K, ϕ⋅n = 0 on ∂K} Here w is the input", "* nbc.ds() for sbc in sim.data['slip_bcs'].get('u', []): pass # L += 0 else:", "incompressibility equation if incompressibility_flux_type == 'central': u_hat_dS = dolfin.avg(w) elif incompressibility_flux_type == 'upwind':", "from dolfin import dot, as_vector, dx, dS, ds, LocalSolver class VelocityBDMProjection: def __init__(", "functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = VectorElement('DG', mesh.ufl_cell(), k - 2)", "as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of [0, 0, v3b] a += dot(u, v3) *", "% d] neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' % d,", "FacetNormal, TrialFunction, TestFunction, TestFunctions, Function from dolfin import dot, as_vector, dx, dS, ds,", "generating exactly incompressible approximate velocities\" by <NAME> (2009) For each element K in", "2) e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3) em = MixedElement([e1, e2, e3]) W =", "d, []) neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' % d,", "Equation 3 - BDM Phi : empty for DG1 return a, L, V", "and û is the flux at each face. P_{x} is the space of", "matrices and store for usage in projection self.local_solver = LocalSolver(a, L) self.local_solver.factorize() self.temp_function", "2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh) #", "Vout = V else: pdeg = degree Vout = FunctionSpace(V.mesh(), 'DG', degree) pg", "dS, ds, LocalSolver class VelocityBDMProjection: def __init__( self, simulation, w, incompressibility_flux_type='central', D12=None, degree=None,", "in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the", "of the Function w \"\"\" # Find the projected velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign", "* dx L += dot(w, v2) * dx # Equation 3 - BDM", "pg = (pdeg, gdim) assert ue.family() == 'Discontinuous Lagrange' assert incompressibility_flux_type in ('central',", "em = MixedElement([e1, e2, e3]) W = FunctionSpace(mesh, em) v1, v2, v3b =", "in projection self.local_solver = LocalSolver(a, L) self.local_solver.factorize() self.temp_function = Function(V) self.w = w", "# Assign to w w = self.w if w is None else w", "u_hat_dS = switch * w('+') + (1 - switch) * w('-') if D12", "* dx L += dot(w, v2) * dx return a, L, V def", "P_{x} is the space of polynomials of order k The flux type can", "dx, dS, ds, LocalSolver class VelocityBDMProjection: def __init__( self, simulation, w, incompressibility_flux_type='central', D12=None,", "k) n = FacetNormal(mesh) # The mixed function space of the projection test", "- BDM Phi v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of [0, 0, v3b]", "be 'central' or 'upwind' \"\"\" self.simulation = simulation simulation.log.info(' Setting up velocity BDM", "+= dot(w, n) * v1 * ds # Equation 2 - internal shape", "of order k The flux type can be 'central' or 'upwind' \"\"\" self.simulation", "function in DG2 space and û is the flux at each face. P_{x}", "run(self, w=None): \"\"\" Perform the projection based on the current state of the", "or 'upwind' \"\"\" self.simulation = simulation simulation.log.info(' Setting up velocity BDM projection') V", "a, L, V def run(self, w=None): \"\"\" Perform the projection based on the", "use_nedelec and pdeg > 1: a, L, V = self._setup_projection_nedelec( w, incompressibility_flux_type, D12,", "dolfin from dolfin import FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace from dolfin import FacetNormal,", "incompressibility_flux_type == 'upwind': w_nU = (dot(w, n) + abs(dot(w, n))) / 2.0 switch", "a DG2 function \"\"\" sim = self.simulation k = 2 gdim = 2", "v1 = TestFunction(W) u = TrialFunction(V) # The same fluxes that are used", "L) self.local_solver.factorize() self.temp_function = Function(V) self.w = w # Create function assigners self.assigners", "current state of the Function w \"\"\" # Find the projected velocity self.local_solver.solve_local_rhs(self.temp_function)", "2: a, L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs) else: raise NotImplementedError( 'VelocityBDMProjection", "= L = 0 for R in '+-': a += dot(u(R), n(R)) *", "' 'degree %d and dimension %d' % pg ) # Pre-factorize matrices and", "state of the Function w \"\"\" # Find the projected velocity self.local_solver.solve_local_rhs(self.temp_function) #", "face. P_{x} is the space of polynomials of order k The flux type", "- internal shape a += dot(u, v2) * dx L += dot(w, v2)", "v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of [0, 0, v3b] a += dot(u,", "use_bcs) elif gdim == 2 and pdeg == 2: a, L, V =", "and pdeg == 2: a, L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs) else:", "by <NAME> (2009) For each element K in the mesh: <u⋅n, φ> =", "projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = FiniteElement('N1curl', mesh.ufl_cell(), k", "for i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\"", "w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim ) elif gdim == 2 and pdeg", "_setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where the result is", "- internal shape using 'Nedelec 1st kind H(curl)' elements a += dot(u, v2)", "\"\"\" # Find the projected velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign to w w =", "'degree %d and dimension %d' % pg ) # Pre-factorize matrices and store", "(dot(w, n) + abs(dot(w, n))) / 2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0)", "functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = FiniteElement('N1curl', mesh.ufl_cell(), k - 1)", "gdim) * dolfin.jump(w, n) # Equation 1 - flux through the sides a", "= 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) W = FunctionSpace(mesh,", "k The flux type can be 'central' or 'upwind' \"\"\" self.simulation = simulation", "+= dot(u_hat_dS, n(R)) * v1(R) * dS # Eq. 1 cont. - flux", "L += u_bc * n[d] * v1 * dbc.ds() for nbc in neumann_bcs", "Implement the projection where the result is BDM embeded in a DG2 function", "ds # Equation 2 - internal shape a += dot(u, v2) * dx", "store for usage in projection self.local_solver = LocalSolver(a, L) self.local_solver.factorize() self.temp_function = Function(V)", "= 0 on ∂K} Here w is the input velocity function in DG2", "use_nedelec=True, ): \"\"\" Implement equation 4a and 4b in \"Two new techniques for", "assert ue.family() == 'Discontinuous Lagrange' assert incompressibility_flux_type in ('central', 'upwind') if use_nedelec and", "def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where the result", "= <û⋅n, φ> ∀ ϕ ∈ P_{k}(F) for any face F ∈ ∂K", "w_nU = (dot(w, n) + abs(dot(w, n))) / 2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0),", "n) * v1 * ds # Equation 2 - internal shape : empty", "switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0) u_hat_dS = switch * w('+') + (1", "the result is BDM embeded in a DG2 function \"\"\" sim = self.simulation", "= MixedElement([e1, e2, e3]) W = FunctionSpace(mesh, em) v1, v2, v3b = TestFunctions(W)", "BDM Phi v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of [0, 0, v3b] a", "\"\"\" Implement the projection where the result is BDM embeded in a DG1", "== 2 and pdeg == 2: a, L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12,", "= 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh)", "self, simulation, w, incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True, use_nedelec=True, ): \"\"\" Implement equation 4a", "a += dot(u, v2) * dx L += dot(w, v2) * dx #", "# Equation 3 - BDM Phi : empty for DG1 return a, L,", "n) * v1 * ds if use_bcs: for d in range(gdim): dirichlet_bcs =", "φ ∈ P_{k-2}(K)^2 (u, ϕ) = (w, ϕ) ∀ φ ∈ {ϕ ∈", "projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = VectorElement('DG', mesh.ufl_cell(), k", "* dx L += dot(w, v3) * dx return a, L, V def", "pdeg == 1: a, L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs) elif gdim", "n(R)) * v1(R) * dS # Eq. 1 cont. - flux through external", "sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' % d, []) outlet_bcs = sim.data['outlet_bcs']", "Copyright (C) 2015-2019 <NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin from dolfin import FiniteElement,", "neumann_bcs + robin_bcs + outlet_bcs: if nbc.enforce_zero_flux: pass # L += 0 else:", "w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w, n)", "VectorFunctionSpace from dolfin import FacetNormal, TrialFunction, TestFunction, TestFunctions, Function from dolfin import dot,", "% d, []) outlet_bcs = sim.data['outlet_bcs'] for dbc in dirichlet_bcs: u_bc = dbc.func()", "test function \"\"\" sim = self.simulation k = pdeg mesh = w[0].function_space().mesh() V", "dot(u(R), n(R)) * v1(R) * dS L += dot(u_hat_dS, n(R)) * v1(R) *", "Eq. 1 cont. - flux through external boundaries a += dot(u, n) *", "for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d, []) neumann_bcs = sim.data['neumann_bcs'].get('u%d'", "dimension %d' % pg ) # Pre-factorize matrices and store for usage in", "incompressibility_flux_type == 'central': u_hat_dS = dolfin.avg(w) elif incompressibility_flux_type == 'upwind': w_nU = (dot(w,", "self.w = w # Create function assigners self.assigners = [] for i in", "self.simulation k = 1 gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh,", "(u, ϕ) = (w, ϕ) ∀ φ ∈ {ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ", "flux at each face. P_{x} is the space of polynomials of order k", "result is BDM embeded in a DG1 function \"\"\" sim = self.simulation k", "the Function w \"\"\" # Find the projected velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign to", "a, L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs) elif gdim == 2 and", "a += dot(u, v3) * dx L += dot(w, v3) * dx return", "Equation 2 - internal shape a += dot(u, v2) * dx L +=", "\"\"\" self.simulation = simulation simulation.log.info(' Setting up velocity BDM projection') V = w[0].function_space()", "\"\"\" sim = self.simulation k = pdeg mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh,", "test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = FiniteElement('N1curl', mesh.ufl_cell(), k -", "'DG', degree) pg = (pdeg, gdim) assert ue.family() == 'Discontinuous Lagrange' assert incompressibility_flux_type", "n(R)) * v1(R) * dS L += dot(u_hat_dS, n(R)) * v1(R) * dS", "return a, L, V def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the", ": ∇⋅ϕ = 0 in K, ϕ⋅n = 0 on ∂K} Here w", "on the current state of the Function w \"\"\" # Find the projected", "self.assigners = [] for i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self, w, incompressibility_flux_type,", "face F ∈ ∂K (u, ϕ) = (w, ϕ) ∀ φ ∈ P_{k-2}(K)^2", "flux type can be 'central' or 'upwind' \"\"\" self.simulation = simulation simulation.log.info(' Setting", "(pdeg, gdim) assert ue.family() == 'Discontinuous Lagrange' assert incompressibility_flux_type in ('central', 'upwind') if", "∂K (u, ϕ) = (w, ϕ) ∀ φ ∈ P_{k-2}(K)^2 (u, ϕ) =", "if incompressibility_flux_type == 'central': u_hat_dS = dolfin.avg(w) elif incompressibility_flux_type == 'upwind': w_nU =", "= self.w if w is None else w U = self.temp_function.split() for i,", "+= dot(w, v2) * dx # Equation 3 - BDM Phi v3 =", "of [0, 0, v3b] a += dot(u, v3) * dx L += dot(w,", "assert incompressibility_flux_type in ('central', 'upwind') if use_nedelec and pdeg > 1: a, L,", "= dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0) u_hat_dS = switch * w('+') + (1 -", "else: raise NotImplementedError( 'VelocityBDMProjection does not support ' 'degree %d and dimension %d'", "%d and dimension %d' % pg ) # Pre-factorize matrices and store for", "def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim): \"\"\" Implement the BDM-like projection", "dbc in dirichlet_bcs: u_bc = dbc.func() L += u_bc * n[d] * v1", "{ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ = 0 in K, ϕ⋅n = 0 on", "range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d, []) neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, [])", "dS L += dot(u_hat_dS, n(R)) * v1(R) * dS # Eq. 1 cont.", "\"\"\" Implement the projection where the result is BDM embeded in a DG2", "function \"\"\" sim = self.simulation k = 1 gdim = 2 mesh =", "'upwind' \"\"\" self.simulation = simulation simulation.log.info(' Setting up velocity BDM projection') V =", "self.w if w is None else w U = self.temp_function.split() for i, a", "mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) W = FunctionSpace(mesh, 'DGT', k)", "('central', 'upwind') if use_nedelec and pdeg > 1: a, L, V = self._setup_projection_nedelec(", "for DG1 return a, L, V def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\"", "= 2 gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k)", "v1, v2 = TestFunctions(W) u = TrialFunction(V) # The same fluxes that are", "in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d, []) neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d,", "L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs) elif gdim == 2 and pdeg", ") elif gdim == 2 and pdeg == 1: a, L, V =", "incompressibility_flux_type, D12, use_bcs, pdeg, gdim ) elif gdim == 2 and pdeg ==", "'DG', k) n = FacetNormal(mesh) # The mixed function space of the projection", "* v1(R) * dS # Eq. 1 cont. - flux through external boundaries", "k) n = FacetNormal(mesh) v1 = TestFunction(W) u = TrialFunction(V) # The same", "dot(u, v2) * dx L += dot(w, v2) * dx return a, L,", "∈ P_{k-2}(K)^2 (u, ϕ) = (w, ϕ) ∀ φ ∈ {ϕ ∈ P_{k}(K)^2", "else: L += dot(w, n) * v1 * ds # Equation 2 -", "switch) * w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12] * gdim)", "degree) pg = (pdeg, gdim) assert ue.family() == 'Discontinuous Lagrange' assert incompressibility_flux_type in", "= self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs) else: raise NotImplementedError( 'VelocityBDMProjection does not support '", "# L += 0 else: L += w[d] * n[d] * v1 *", "# Find the projected velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign to w w = self.w", "self.temp_function = Function(V) self.w = w # Create function assigners self.assigners = []", "d, []) outlet_bcs = sim.data['outlet_bcs'] for dbc in dirichlet_bcs: u_bc = dbc.func() L", "n) + abs(dot(w, n))) / 2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0) u_hat_dS", "v2) * dx # Equation 3 - BDM Phi v3 = as_vector([v3b.dx(1), -v3b.dx(0)])", "self.local_solver.factorize() self.temp_function = Function(V) self.w = w # Create function assigners self.assigners =", "/ 2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0) u_hat_dS = switch * w('+')", "flux through the sides a = L = 0 for R in '+-':", "v3b] a += dot(u, v3) * dx L += dot(w, v3) * dx", "+= dot(w, v3) * dx return a, L, V def _setup_projection_nedelec(self, w, incompressibility_flux_type,", "u_bc * n[d] * v1 * dbc.ds() for nbc in neumann_bcs + robin_bcs", "outlet_bcs: if nbc.enforce_zero_flux: pass # L += 0 else: L += w[d] *", "in a DG2 function \"\"\" sim = self.simulation k = 2 gdim =", "in ('central', 'upwind') if use_nedelec and pdeg > 1: a, L, V =", "and pdeg == 1: a, L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs) elif", "\"\"\" Implement equation 4a and 4b in \"Two new techniques for generating exactly", "<û⋅n, φ> ∀ ϕ ∈ P_{k}(F) for any face F ∈ ∂K (u,", "== 'Discontinuous Lagrange' assert incompressibility_flux_type in ('central', 'upwind') if use_nedelec and pdeg >", "% d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' % d, []) outlet_bcs = sim.data['outlet_bcs'] for", "test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = VectorElement('DG', mesh.ufl_cell(), k -", "== 2: a, L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs) else: raise NotImplementedError(", "+= 0 else: L += dot(w, n) * v1 * ds # Equation", "function \"\"\" sim = self.simulation k = pdeg mesh = w[0].function_space().mesh() V =", "use_bcs, pdeg, gdim ) elif gdim == 2 and pdeg == 1: a,", "e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = FiniteElement('N1curl', mesh.ufl_cell(), k - 1) em", "+= dot(u, v2) * dx L += dot(w, v2) * dx # Equation", "projection using Nedelec elements in the test function \"\"\" sim = self.simulation k", "+= dot(w, v2) * dx return a, L, V def run(self, w=None): \"\"\"", "projection where the result is BDM embeded in a DG1 function \"\"\" sim", "D12 is not None: u_hat_dS += dolfin.Constant([D12] * gdim) * dolfin.jump(w, n) #", "dx # Equation 3 - BDM Phi v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl", "is None else w U = self.temp_function.split() for i, a in enumerate(self.assigners): a.assign(w[i],", "= w # Create function assigners self.assigners = [] for i in range(gdim):", "DG1 return a, L, V def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement", "e2 = FiniteElement('N1curl', mesh.ufl_cell(), k - 1) em = MixedElement([e1, e2]) W =", "e2]) W = FunctionSpace(mesh, em) v1, v2 = TestFunctions(W) u = TrialFunction(V) #", "2 - internal shape a += dot(u, v2) * dx L += dot(w,", "[]): pass # L += 0 else: L += dot(w, n) * v1", "The flux type can be 'central' or 'upwind' \"\"\" self.simulation = simulation simulation.log.info('", "= simulation simulation.log.info(' Setting up velocity BDM projection') V = w[0].function_space() ue =", "dbc.ds() for nbc in neumann_bcs + robin_bcs + outlet_bcs: if nbc.enforce_zero_flux: pass #", "1 gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) W", "= sim.data['dirichlet_bcs'].get('u%d' % d, []) neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs =", "empty for DG1 # Equation 3 - BDM Phi : empty for DG1", "∈ {ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ = 0 in K, ϕ⋅n = 0", "if degree is None: pdeg = ue.degree() Vout = V else: pdeg =", "v1 * ds # Equation 2 - internal shape a += dot(u, v2)", "dx L += dot(w, v2) * dx return a, L, V def run(self,", "elif incompressibility_flux_type == 'upwind': w_nU = (dot(w, n) + abs(dot(w, n))) / 2.0", "not None: u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w, n) # Equation 1 -", "0, v3b] a += dot(u, v3) * dx L += dot(w, v3) *", ": empty for DG1 # Equation 3 - BDM Phi : empty for", "* ds if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d]", "V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs) else: raise NotImplementedError( 'VelocityBDMProjection does not support", "# Curl of [0, 0, v3b] a += dot(u, v3) * dx L", "Nedelec elements in the test function \"\"\" sim = self.simulation k = pdeg", "TrialFunction, TestFunction, TestFunctions, Function from dolfin import dot, as_vector, dx, dS, ds, LocalSolver", "switch) * w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12, D12]) *", "the result is BDM embeded in a DG1 function \"\"\" sim = self.simulation", "None: pdeg = ue.degree() Vout = V else: pdeg = degree Vout =", "embeded in a DG1 function \"\"\" sim = self.simulation k = 1 gdim", "w # Create function assigners self.assigners = [] for i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout,", "in dirichlet_bcs: u_bc = dbc.func() L += u_bc * n[d] * v1 *", "L, V def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where", "w = self.w if w is None else w U = self.temp_function.split() for", "V else: pdeg = degree Vout = FunctionSpace(V.mesh(), 'DG', degree) pg = (pdeg,", "+= dot(u, v2) * dx L += dot(w, v2) * dx return a,", "gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) W =", "# Create function assigners self.assigners = [] for i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i)))", "in sim.data['slip_bcs'].get('u', []): pass # L += 0 else: L += dot(w, n)", "the projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = VectorElement('DG', mesh.ufl_cell(),", "e2 = VectorElement('DG', mesh.ufl_cell(), k - 2) e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3) em", "= pdeg mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh)", "n) * v1 * ds # Equation 2 - internal shape using 'Nedelec", "* n[d] * v1 * dbc.ds() for nbc in neumann_bcs + robin_bcs +", "2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0) u_hat_dS = switch * w('+') +", "FiniteElement('N1curl', mesh.ufl_cell(), k - 1) em = MixedElement([e1, e2]) W = FunctionSpace(mesh, em)", "ue.degree() Vout = V else: pdeg = degree Vout = FunctionSpace(V.mesh(), 'DG', degree)", "dot, as_vector, dx, dS, ds, LocalSolver class VelocityBDMProjection: def __init__( self, simulation, w,", "a DG1 function \"\"\" sim = self.simulation k = 1 gdim = 2", "sim.data['outlet_bcs'] for dbc in dirichlet_bcs: u_bc = dbc.func() L += u_bc * n[d]", "n = FacetNormal(mesh) v1 = TestFunction(W) u = TrialFunction(V) # The same fluxes", "None else w U = self.temp_function.split() for i, a in enumerate(self.assigners): a.assign(w[i], U[i])", "dbc.func() L += u_bc * n[d] * v1 * dbc.ds() for nbc in", "= w.ufl_shape[0] if degree is None: pdeg = ue.degree() Vout = V else:", "V = self._setup_projection_nedelec( w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim ) elif gdim ==", "Phi : empty for DG1 return a, L, V def _setup_dg2_projection_2D(self, w, incompressibility_flux_type,", "fluxes that are used in the incompressibility equation if incompressibility_flux_type == 'central': u_hat_dS", "raise NotImplementedError( 'VelocityBDMProjection does not support ' 'degree %d and dimension %d' %", "w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12] * gdim) * dolfin.jump(w,", "dolfin import FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace from dolfin import FacetNormal, TrialFunction, TestFunction,", "Pre-factorize matrices and store for usage in projection self.local_solver = LocalSolver(a, L) self.local_solver.factorize()", "= dbc.func() L += u_bc * n[d] * v1 * dbc.ds() for nbc", "in K, ϕ⋅n = 0 on ∂K} Here w is the input velocity", "P_{k-2}(K)^2 (u, ϕ) = (w, ϕ) ∀ φ ∈ {ϕ ∈ P_{k}(K)^2 :", "(C) 2015-2019 <NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin from dolfin import FiniteElement, VectorElement,", "if w is None else w U = self.temp_function.split() for i, a in", "+ (1 - switch) * w('-') if D12 is not None: u_hat_dS +=", "on ∂K} Here w is the input velocity function in DG2 space and", "that are used in the incompressibility equation if incompressibility_flux_type == 'central': u_hat_dS =", "k - 1) em = MixedElement([e1, e2]) W = FunctionSpace(mesh, em) v1, v2", "is BDM embeded in a DG1 function \"\"\" sim = self.simulation k =", "0 on ∂K} Here w is the input velocity function in DG2 space", "FiniteElement('DGT', mesh.ufl_cell(), k) e2 = FiniteElement('N1curl', mesh.ufl_cell(), k - 1) em = MixedElement([e1,", "\"Two new techniques for generating exactly incompressible approximate velocities\" by <NAME> (2009) For", "_setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where the result is", "sides a = L = 0 for R in '+-': a += dot(u(R),", "W = FunctionSpace(mesh, 'DGT', k) n = FacetNormal(mesh) v1 = TestFunction(W) u =", "dolfin.Constant([D12, D12]) * dolfin.jump(w, n) # Equation 1 - flux through the sides", "ϕ) ∀ φ ∈ P_{k-2}(K)^2 (u, ϕ) = (w, ϕ) ∀ φ ∈", "VectorElement('DG', mesh.ufl_cell(), k - 2) e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3) em = MixedElement([e1,", "- BDM Phi : empty for DG1 return a, L, V def _setup_dg2_projection_2D(self,", "∀ φ ∈ {ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ = 0 in K, ϕ⋅n", "the incompressibility equation if incompressibility_flux_type == 'central': u_hat_dS = dolfin.avg(w) elif incompressibility_flux_type ==", "= FiniteElement('DGT', mesh.ufl_cell(), k) e2 = VectorElement('DG', mesh.ufl_cell(), k - 2) e3 =", "V = VectorFunctionSpace(mesh, 'DG', k) W = FunctionSpace(mesh, 'DGT', k) n = FacetNormal(mesh)", "using Nedelec elements in the test function \"\"\" sim = self.simulation k =", "LocalSolver class VelocityBDMProjection: def __init__( self, simulation, w, incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True, use_nedelec=True,", "ds # Equation 2 - internal shape using 'Nedelec 1st kind H(curl)' elements", "= sim.data['robin_bcs'].get('u%d' % d, []) outlet_bcs = sim.data['outlet_bcs'] for dbc in dirichlet_bcs: u_bc", "self.local_solver = LocalSolver(a, L) self.local_solver.factorize() self.temp_function = Function(V) self.w = w # Create", "each face. P_{x} is the space of polynomials of order k The flux", "= switch * w('+') + (1 - switch) * w('-') if D12 is", "= V else: pdeg = degree Vout = FunctionSpace(V.mesh(), 'DG', degree) pg =", "approximate velocities\" by <NAME> (2009) For each element K in the mesh: <u⋅n,", "elif gdim == 2 and pdeg == 1: a, L, V = self._setup_dg1_projection_2D(w,", "# Copyright (C) 2015-2019 <NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin from dolfin import", "any face F ∈ ∂K (u, ϕ) = (w, ϕ) ∀ φ ∈", "- 1) em = MixedElement([e1, e2]) W = FunctionSpace(mesh, em) v1, v2 =", "velocity function in DG2 space and û is the flux at each face.", "use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d, []) neumann_bcs =", "dolfin import FacetNormal, TrialFunction, TestFunction, TestFunctions, Function from dolfin import dot, as_vector, dx,", "input velocity function in DG2 space and û is the flux at each", "[]) robin_bcs = sim.data['robin_bcs'].get('u%d' % d, []) outlet_bcs = sim.data['outlet_bcs'] for dbc in", "* gdim) * dolfin.jump(w, n) # Equation 1 - flux through the sides", "= dolfin.avg(w) elif incompressibility_flux_type == 'upwind': w_nU = (dot(w, n) + abs(dot(w, n)))", "Find the projected velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign to w w = self.w if", "type can be 'central' or 'upwind' \"\"\" self.simulation = simulation simulation.log.info(' Setting up", "D12, use_bcs, pdeg, gdim ) elif gdim == 2 and pdeg == 1:", "= ue.degree() Vout = V else: pdeg = degree Vout = FunctionSpace(V.mesh(), 'DG',", "FacetNormal(mesh) # The mixed function space of the projection test functions e1 =", "FunctionSpace, VectorFunctionSpace from dolfin import FacetNormal, TrialFunction, TestFunction, TestFunctions, Function from dolfin import", "L = 0 for R in '+-': a += dot(u(R), n(R)) * v1(R)", "used in the incompressibility equation if incompressibility_flux_type == 'central': u_hat_dS = dolfin.avg(w) elif", "L += w[d] * n[d] * v1 * nbc.ds() for sbc in sim.data['slip_bcs'].get('u',", "degree is None: pdeg = ue.degree() Vout = V else: pdeg = degree", "sim = self.simulation k = 1 gdim = 2 mesh = w[0].function_space().mesh() V", "* dolfin.jump(w, n) # Equation 1 - flux through the sides a =", "- flux through external boundaries a += dot(u, n) * v1 * ds", "= FunctionSpace(mesh, em) v1, v2 = TestFunctions(W) u = TrialFunction(V) # The same", "projected velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign to w w = self.w if w is", "'central': u_hat_dS = dolfin.avg(w) elif incompressibility_flux_type == 'upwind': w_nU = (dot(w, n) +", "= self.simulation k = 1 gdim = 2 mesh = w[0].function_space().mesh() V =", "from dolfin import FacetNormal, TrialFunction, TestFunction, TestFunctions, Function from dolfin import dot, as_vector,", "= sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' % d, []) outlet_bcs =", "= VectorElement('DG', mesh.ufl_cell(), k - 2) e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3) em =", "is None: pdeg = ue.degree() Vout = V else: pdeg = degree Vout", "1st kind H(curl)' elements a += dot(u, v2) * dx L += dot(w,", "4b in \"Two new techniques for generating exactly incompressible approximate velocities\" by <NAME>", "= Function(V) self.w = w # Create function assigners self.assigners = [] for", "__init__( self, simulation, w, incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True, use_nedelec=True, ): \"\"\" Implement equation", "Equation 2 - internal shape : empty for DG1 # Equation 3 -", "def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where the result", "NotImplementedError( 'VelocityBDMProjection does not support ' 'degree %d and dimension %d' % pg", "SPDX-License-Identifier: Apache-2.0 import dolfin from dolfin import FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace from", "w[d] * n[d] * v1 * nbc.ds() for sbc in sim.data['slip_bcs'].get('u', []): pass", "= FunctionSpace(mesh, 'DGT', k) n = FacetNormal(mesh) v1 = TestFunction(W) u = TrialFunction(V)", "for sbc in sim.data['slip_bcs'].get('u', []): pass # L += 0 else: L +=", "self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where", "sim = self.simulation k = pdeg mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG',", "VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace from dolfin import FacetNormal, TrialFunction, TestFunction, TestFunctions, Function from", "3) em = MixedElement([e1, e2, e3]) W = FunctionSpace(mesh, em) v1, v2, v3b", "FiniteElement('Bubble', mesh.ufl_cell(), 3) em = MixedElement([e1, e2, e3]) W = FunctionSpace(mesh, em) v1,", "\"\"\" sim = self.simulation k = 1 gdim = 2 mesh = w[0].function_space().mesh()", "VectorFunctionSpace(mesh, 'DG', k) W = FunctionSpace(mesh, 'DGT', k) n = FacetNormal(mesh) v1 =", "same fluxes that are used in the incompressibility equation if incompressibility_flux_type == 'central':", "L, V def run(self, w=None): \"\"\" Perform the projection based on the current", "use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs = sim.data['neumann_bcs'].get('u%d'", "d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d, []) neumann_bcs = sim.data['neumann_bcs'].get('u%d' %", "class VelocityBDMProjection: def __init__( self, simulation, w, incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True, use_nedelec=True, ):", "is not None: u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w, n) # Equation 1", "v2) * dx L += dot(w, v2) * dx return a, L, V", "D12 is not None: u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w, n) # Equation", "for usage in projection self.local_solver = LocalSolver(a, L) self.local_solver.factorize() self.temp_function = Function(V) self.w", "<reponame>TormodLandet/Ocellaris # Copyright (C) 2015-2019 <NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin from dolfin", "v1 * ds # Equation 2 - internal shape : empty for DG1", "function space of the projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2", "equation if incompressibility_flux_type == 'central': u_hat_dS = dolfin.avg(w) elif incompressibility_flux_type == 'upwind': w_nU", "Implement the BDM-like projection using Nedelec elements in the test function \"\"\" sim", "dS # Eq. 1 cont. - flux through external boundaries a += dot(u,", "<NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin from dolfin import FiniteElement, VectorElement, MixedElement, FunctionSpace,", "* v1 * dbc.ds() for nbc in neumann_bcs + robin_bcs + outlet_bcs: if", "where the result is BDM embeded in a DG1 function \"\"\" sim =", "degree=None, use_bcs=True, use_nedelec=True, ): \"\"\" Implement equation 4a and 4b in \"Two new", "a, L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs) else: raise NotImplementedError( 'VelocityBDMProjection does", "pdeg == 2: a, L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs) else: raise", "techniques for generating exactly incompressible approximate velocities\" by <NAME> (2009) For each element", "== 2 and pdeg == 1: a, L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12,", "- flux through the sides a = L = 0 for R in", "1: a, L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs) elif gdim == 2", "Curl of [0, 0, v3b] a += dot(u, v3) * dx L +=", "self.simulation k = pdeg mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n", "dot(w, n) * v1 * ds # Equation 2 - internal shape a", "where the result is BDM embeded in a DG2 function \"\"\" sim =", "up velocity BDM projection') V = w[0].function_space() ue = V.ufl_element() gdim = w.ufl_shape[0]", "FunctionSpace(mesh, em) v1, v2, v3b = TestFunctions(W) u = TrialFunction(V) # The same", "# The same fluxes that are used in the incompressibility equation if incompressibility_flux_type", "\"\"\" Perform the projection based on the current state of the Function w", "w \"\"\" # Find the projected velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign to w w", "a, L, V = self._setup_projection_nedelec( w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim ) elif", "k - 2) e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3) em = MixedElement([e1, e2, e3])", "pdeg > 1: a, L, V = self._setup_projection_nedelec( w, incompressibility_flux_type, D12, use_bcs, pdeg,", "gdim) assert ue.family() == 'Discontinuous Lagrange' assert incompressibility_flux_type in ('central', 'upwind') if use_nedelec", "dot(u_hat_dS, n(R)) * v1(R) * dS # Eq. 1 cont. - flux through", "use_bcs): \"\"\" Implement the projection where the result is BDM embeded in a", "L += dot(w, v2) * dx return a, L, V def run(self, w=None):", "robin_bcs + outlet_bcs: if nbc.enforce_zero_flux: pass # L += 0 else: L +=", "ds, LocalSolver class VelocityBDMProjection: def __init__( self, simulation, w, incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True,", "∈ P_{k}(F) for any face F ∈ ∂K (u, ϕ) = (w, ϕ)", "= 1 gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k)", "v1 * ds if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' %", "e3]) W = FunctionSpace(mesh, em) v1, v2, v3b = TestFunctions(W) u = TrialFunction(V)", "0.0) u_hat_dS = switch * w('+') + (1 - switch) * w('-') if", "∇⋅ϕ = 0 in K, ϕ⋅n = 0 on ∂K} Here w is", "empty for DG1 return a, L, V def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs):", "\"\"\" sim = self.simulation k = 2 gdim = 2 mesh = w[0].function_space().mesh()", "internal shape a += dot(u, v2) * dx L += dot(w, v2) *", "V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs) elif gdim == 2 and pdeg ==", "+ outlet_bcs: if nbc.enforce_zero_flux: pass # L += 0 else: L += w[d]", "sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' %", "(1 - switch) * w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12,", "n) # Equation 1 - flux through the sides a = L =", "self.simulation k = 2 gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh,", "elements a += dot(u, v2) * dx L += dot(w, v2) * dx", "use_bcs=True, use_nedelec=True, ): \"\"\" Implement equation 4a and 4b in \"Two new techniques", "self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs) elif gdim == 2 and pdeg == 2: a,", "use_bcs) else: raise NotImplementedError( 'VelocityBDMProjection does not support ' 'degree %d and dimension", "of the projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = VectorElement('DG',", "d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d,", "nbc.enforce_zero_flux: pass # L += 0 else: L += w[d] * n[d] *", "incompressible approximate velocities\" by <NAME> (2009) For each element K in the mesh:", "[0, 0, v3b] a += dot(u, v3) * dx L += dot(w, v3)", "velocities\" by <NAME> (2009) For each element K in the mesh: <u⋅n, φ>", "import dolfin from dolfin import FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace from dolfin import", "4a and 4b in \"Two new techniques for generating exactly incompressible approximate velocities\"", "n = FacetNormal(mesh) # The mixed function space of the projection test functions", "= w[0].function_space() ue = V.ufl_element() gdim = w.ufl_shape[0] if degree is None: pdeg", "w is None else w U = self.temp_function.split() for i, a in enumerate(self.assigners):", "'Discontinuous Lagrange' assert incompressibility_flux_type in ('central', 'upwind') if use_nedelec and pdeg > 1:", "Function(V) self.w = w # Create function assigners self.assigners = [] for i", "* v1 * ds # Equation 2 - internal shape : empty for", "\"\"\" Implement the BDM-like projection using Nedelec elements in the test function \"\"\"", "= FunctionSpace(mesh, em) v1, v2, v3b = TestFunctions(W) u = TrialFunction(V) # The", "the flux at each face. P_{x} is the space of polynomials of order", "VelocityBDMProjection: def __init__( self, simulation, w, incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True, use_nedelec=True, ): \"\"\"", "- switch) * w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12, D12])", "k = 2 gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG',", "R in '+-': a += dot(u(R), n(R)) * v1(R) * dS L +=", "FiniteElement('DGT', mesh.ufl_cell(), k) e2 = VectorElement('DG', mesh.ufl_cell(), k - 2) e3 = FiniteElement('Bubble',", "simulation simulation.log.info(' Setting up velocity BDM projection') V = w[0].function_space() ue = V.ufl_element()", "_setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim): \"\"\" Implement the BDM-like projection using", "w is the input velocity function in DG2 space and û is the", "space and û is the flux at each face. P_{x} is the space", "boundaries a += dot(u, n) * v1 * ds if use_bcs: for d", "= FacetNormal(mesh) v1 = TestFunction(W) u = TrialFunction(V) # The same fluxes that", "'+-': a += dot(u(R), n(R)) * v1(R) * dS L += dot(u_hat_dS, n(R))", "= TestFunctions(W) u = TrialFunction(V) # The same fluxes that are used in", "from dolfin import FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace from dolfin import FacetNormal, TrialFunction,", "def __init__( self, simulation, w, incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True, use_nedelec=True, ): \"\"\" Implement", "each element K in the mesh: <u⋅n, φ> = <û⋅n, φ> ∀ ϕ", "Equation 3 - BDM Phi v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of [0,", "L += dot(w, v3) * dx return a, L, V def _setup_projection_nedelec(self, w,", "= (pdeg, gdim) assert ue.family() == 'Discontinuous Lagrange' assert incompressibility_flux_type in ('central', 'upwind')", "L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs) else: raise NotImplementedError( 'VelocityBDMProjection does not", "# Pre-factorize matrices and store for usage in projection self.local_solver = LocalSolver(a, L)", "abs(dot(w, n))) / 2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0) u_hat_dS = switch", "= w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh) # The mixed", "* n[d] * v1 * nbc.ds() for sbc in sim.data['slip_bcs'].get('u', []): pass #", "P_{k}(F) for any face F ∈ ∂K (u, ϕ) = (w, ϕ) ∀", "MixedElement([e1, e2, e3]) W = FunctionSpace(mesh, em) v1, v2, v3b = TestFunctions(W) u", "gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n =", "does not support ' 'degree %d and dimension %d' % pg ) #", "* v1(R) * dS L += dot(u_hat_dS, n(R)) * v1(R) * dS #", "BDM-like projection using Nedelec elements in the test function \"\"\" sim = self.simulation", "in the test function \"\"\" sim = self.simulation k = pdeg mesh =", "v3) * dx return a, L, V def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs,", "BDM embeded in a DG1 function \"\"\" sim = self.simulation k = 1", "% d, []) neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' %", "ϕ⋅n = 0 on ∂K} Here w is the input velocity function in", "k) e2 = FiniteElement('N1curl', mesh.ufl_cell(), k - 1) em = MixedElement([e1, e2]) W", "the mesh: <u⋅n, φ> = <û⋅n, φ> ∀ ϕ ∈ P_{k}(F) for any", "FacetNormal(mesh) v1 = TestFunction(W) u = TrialFunction(V) # The same fluxes that are", "sbc in sim.data['slip_bcs'].get('u', []): pass # L += 0 else: L += dot(w,", "2015-2019 <NAME> # SPDX-License-Identifier: Apache-2.0 import dolfin from dolfin import FiniteElement, VectorElement, MixedElement,", "= FiniteElement('DGT', mesh.ufl_cell(), k) e2 = FiniteElement('N1curl', mesh.ufl_cell(), k - 1) em =", "dot(w, n) * v1 * ds # Equation 2 - internal shape :", "of polynomials of order k The flux type can be 'central' or 'upwind'", "FunctionSpace(V.mesh(), 'DG', degree) pg = (pdeg, gdim) assert ue.family() == 'Discontinuous Lagrange' assert", "Implement equation 4a and 4b in \"Two new techniques for generating exactly incompressible", "= sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d'", "i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement", "= (w, ϕ) ∀ φ ∈ P_{k-2}(K)^2 (u, ϕ) = (w, ϕ) ∀", "* v1 * ds # Equation 2 - internal shape using 'Nedelec 1st", "+ abs(dot(w, n))) / 2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0) u_hat_dS =", "FunctionSpace(mesh, em) v1, v2 = TestFunctions(W) u = TrialFunction(V) # The same fluxes", "# Equation 2 - internal shape using 'Nedelec 1st kind H(curl)' elements a", "projection') V = w[0].function_space() ue = V.ufl_element() gdim = w.ufl_shape[0] if degree is", "return a, L, V def run(self, w=None): \"\"\" Perform the projection based on", "pdeg = degree Vout = FunctionSpace(V.mesh(), 'DG', degree) pg = (pdeg, gdim) assert", "V = w[0].function_space() ue = V.ufl_element() gdim = w.ufl_shape[0] if degree is None:", "V.ufl_element() gdim = w.ufl_shape[0] if degree is None: pdeg = ue.degree() Vout =", "e2, e3]) W = FunctionSpace(mesh, em) v1, v2, v3b = TestFunctions(W) u =", "function \"\"\" sim = self.simulation k = 2 gdim = 2 mesh =", "# The mixed function space of the projection test functions e1 = FiniteElement('DGT',", "D12, use_bcs) else: raise NotImplementedError( 'VelocityBDMProjection does not support ' 'degree %d and", "-v3b.dx(0)]) # Curl of [0, 0, v3b] a += dot(u, v3) * dx", "elements in the test function \"\"\" sim = self.simulation k = pdeg mesh", "kind H(curl)' elements a += dot(u, v2) * dx L += dot(w, v2)", "D12, use_bcs): \"\"\" Implement the projection where the result is BDM embeded in", "import FacetNormal, TrialFunction, TestFunction, TestFunctions, Function from dolfin import dot, as_vector, dx, dS,", "Lagrange' assert incompressibility_flux_type in ('central', 'upwind') if use_nedelec and pdeg > 1: a,", "# Equation 3 - BDM Phi v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of", "Function w \"\"\" # Find the projected velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign to w", "ϕ ∈ P_{k}(F) for any face F ∈ ∂K (u, ϕ) = (w,", "v1, v2, v3b = TestFunctions(W) u = TrialFunction(V) # The same fluxes that", "Create function assigners self.assigners = [] for i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def", "For each element K in the mesh: <u⋅n, φ> = <û⋅n, φ> ∀", "* ds # Equation 2 - internal shape using 'Nedelec 1st kind H(curl)'", "pass # L += 0 else: L += w[d] * n[d] * v1", "dx L += dot(w, v2) * dx # Equation 3 - BDM Phi", "v2) * dx L += dot(w, v2) * dx # Equation 3 -", "Assign to w w = self.w if w is None else w U", "# Eq. 1 cont. - flux through external boundaries a += dot(u, n)", "outlet_bcs = sim.data['outlet_bcs'] for dbc in dirichlet_bcs: u_bc = dbc.func() L += u_bc", "(w, ϕ) ∀ φ ∈ {ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ = 0 in", "the projection where the result is BDM embeded in a DG1 function \"\"\"", "if nbc.enforce_zero_flux: pass # L += 0 else: L += w[d] * n[d]", "= FiniteElement('Bubble', mesh.ufl_cell(), 3) em = MixedElement([e1, e2, e3]) W = FunctionSpace(mesh, em)", "mesh.ufl_cell(), k - 1) em = MixedElement([e1, e2]) W = FunctionSpace(mesh, em) v1,", ": empty for DG1 return a, L, V def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12,", "if use_nedelec and pdeg > 1: a, L, V = self._setup_projection_nedelec( w, incompressibility_flux_type,", "the current state of the Function w \"\"\" # Find the projected velocity", "for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs = sim.data['neumann_bcs'].get('u%d' %", "* w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12] * gdim) *", "v3) * dx L += dot(w, v3) * dx return a, L, V", "if D12 is not None: u_hat_dS += dolfin.Constant([D12] * gdim) * dolfin.jump(w, n)", "= (dot(w, n) + abs(dot(w, n))) / 2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0,", "K in the mesh: <u⋅n, φ> = <û⋅n, φ> ∀ ϕ ∈ P_{k}(F)", "u_bc = dbc.func() L += u_bc * n[d] * v1 * dbc.ds() for", "w=None): \"\"\" Perform the projection based on the current state of the Function", "MixedElement([e1, e2]) W = FunctionSpace(mesh, em) v1, v2 = TestFunctions(W) u = TrialFunction(V)", "u_hat_dS += dolfin.Constant([D12] * gdim) * dolfin.jump(w, n) # Equation 1 - flux", "w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) W = FunctionSpace(mesh, 'DGT', k) n =", "Function from dolfin import dot, as_vector, dx, dS, ds, LocalSolver class VelocityBDMProjection: def", "w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where the result is BDM", "Perform the projection based on the current state of the Function w \"\"\"", "a, L, V def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection", "= VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh) # The mixed function space of", "dx L += dot(w, v3) * dx return a, L, V def _setup_projection_nedelec(self,", "range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection", "w.ufl_shape[0] if degree is None: pdeg = ue.degree() Vout = V else: pdeg", "in a DG1 function \"\"\" sim = self.simulation k = 1 gdim =", "+= u_bc * n[d] * v1 * dbc.ds() for nbc in neumann_bcs +", "* dS # Eq. 1 cont. - flux through external boundaries a +=", "Phi v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of [0, 0, v3b] a +=", "The same fluxes that are used in the incompressibility equation if incompressibility_flux_type ==", "u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w, n) # Equation 1 - flux through", "v1 * ds # Equation 2 - internal shape using 'Nedelec 1st kind", "element K in the mesh: <u⋅n, φ> = <û⋅n, φ> ∀ ϕ ∈", "%d' % pg ) # Pre-factorize matrices and store for usage in projection", "FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace from dolfin import FacetNormal, TrialFunction, TestFunction, TestFunctions, Function", "the test function \"\"\" sim = self.simulation k = pdeg mesh = w[0].function_space().mesh()", "dolfin.jump(w, n) # Equation 1 - flux through the sides a = L", "Here w is the input velocity function in DG2 space and û is", "* ds # Equation 2 - internal shape : empty for DG1 #", "= FacetNormal(mesh) # The mixed function space of the projection test functions e1", "∈ ∂K (u, ϕ) = (w, ϕ) ∀ φ ∈ P_{k-2}(K)^2 (u, ϕ)", "Apache-2.0 import dolfin from dolfin import FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace from dolfin", "BDM embeded in a DG2 function \"\"\" sim = self.simulation k = 2", "w('+') + (1 - switch) * w('-') if D12 is not None: u_hat_dS", "incompressibility_flux_type, D12, use_bcs) else: raise NotImplementedError( 'VelocityBDMProjection does not support ' 'degree %d", "in DG2 space and û is the flux at each face. P_{x} is", "= TrialFunction(V) # The same fluxes that are used in the incompressibility equation", "φ ∈ {ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ = 0 in K, ϕ⋅n =", "usage in projection self.local_solver = LocalSolver(a, L) self.local_solver.factorize() self.temp_function = Function(V) self.w =", "is BDM embeded in a DG2 function \"\"\" sim = self.simulation k =", "2 gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n", "ds if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d, [])", "v2) * dx return a, L, V def run(self, w=None): \"\"\" Perform the", "of the projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = FiniteElement('N1curl',", "gdim): \"\"\" Implement the BDM-like projection using Nedelec elements in the test function", "* dx return a, L, V def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs, pdeg,", "the sides a = L = 0 for R in '+-': a +=", "φ> = <û⋅n, φ> ∀ ϕ ∈ P_{k}(F) for any face F ∈", "embeded in a DG2 function \"\"\" sim = self.simulation k = 2 gdim", "dolfin.Constant([D12] * gdim) * dolfin.jump(w, n) # Equation 1 - flux through the", "DG1 # Equation 3 - BDM Phi : empty for DG1 return a,", "a, L, V def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim): \"\"\" Implement", "= TestFunction(W) u = TrialFunction(V) # The same fluxes that are used in", "em = MixedElement([e1, e2]) W = FunctionSpace(mesh, em) v1, v2 = TestFunctions(W) u", "space of the projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 =", "== 'upwind': w_nU = (dot(w, n) + abs(dot(w, n))) / 2.0 switch =", "mesh.ufl_cell(), k - 2) e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3) em = MixedElement([e1, e2,", "= LocalSolver(a, L) self.local_solver.factorize() self.temp_function = Function(V) self.w = w # Create function", "# L += 0 else: L += dot(w, n) * v1 * ds", "mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh) # The", "self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs) else: raise NotImplementedError( 'VelocityBDMProjection does not support ' 'degree", "L, V = self._setup_projection_nedelec( w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim ) elif gdim", "D12=None, degree=None, use_bcs=True, use_nedelec=True, ): \"\"\" Implement equation 4a and 4b in \"Two", "1) em = MixedElement([e1, e2]) W = FunctionSpace(mesh, em) v1, v2 = TestFunctions(W)", "= w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) W = FunctionSpace(mesh, 'DGT', k) n", "v1(R) * dS L += dot(u_hat_dS, n(R)) * v1(R) * dS # Eq.", "== 'central': u_hat_dS = dolfin.avg(w) elif incompressibility_flux_type == 'upwind': w_nU = (dot(w, n)", "DG2 function \"\"\" sim = self.simulation k = 2 gdim = 2 mesh", "ϕ) = (w, ϕ) ∀ φ ∈ {ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ =", "2 and pdeg == 2: a, L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs)", "n[d] * v1 * dbc.ds() for nbc in neumann_bcs + robin_bcs + outlet_bcs:", "shape a += dot(u, v2) * dx L += dot(w, v2) * dx", "D12, use_bcs) elif gdim == 2 and pdeg == 2: a, L, V", "function assigners self.assigners = [] for i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self,", "0 else: L += dot(w, n) * v1 * ds # Equation 2", "* ds if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d,", "the space of polynomials of order k The flux type can be 'central'", "em) v1, v2 = TestFunctions(W) u = TrialFunction(V) # The same fluxes that", "v2, v3b = TestFunctions(W) u = TrialFunction(V) # The same fluxes that are", "sim.data['robin_bcs'].get('u%d' % d, []) outlet_bcs = sim.data['outlet_bcs'] for dbc in dirichlet_bcs: u_bc =", "and pdeg > 1: a, L, V = self._setup_projection_nedelec( w, incompressibility_flux_type, D12, use_bcs,", "can be 'central' or 'upwind' \"\"\" self.simulation = simulation simulation.log.info(' Setting up velocity", "+ robin_bcs + outlet_bcs: if nbc.enforce_zero_flux: pass # L += 0 else: L", "degree Vout = FunctionSpace(V.mesh(), 'DG', degree) pg = (pdeg, gdim) assert ue.family() ==", "'DG', k) W = FunctionSpace(mesh, 'DGT', k) n = FacetNormal(mesh) v1 = TestFunction(W)", "to w w = self.w if w is None else w U =", "'central' or 'upwind' \"\"\" self.simulation = simulation simulation.log.info(' Setting up velocity BDM projection')", "+= dolfin.Constant([D12, D12]) * dolfin.jump(w, n) # Equation 1 - flux through the", "L += dot(w, v2) * dx # Equation 3 - BDM Phi v3", "for dbc in dirichlet_bcs: u_bc = dbc.func() L += u_bc * n[d] *", "assigners self.assigners = [] for i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self, w,", "= self.simulation k = pdeg mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k)", "the projection test functions e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = FiniteElement('N1curl', mesh.ufl_cell(),", "and store for usage in projection self.local_solver = LocalSolver(a, L) self.local_solver.factorize() self.temp_function =", "nbc.ds() for sbc in sim.data['slip_bcs'].get('u', []): pass # L += 0 else: L", "is the flux at each face. P_{x} is the space of polynomials of", "k) W = FunctionSpace(mesh, 'DGT', k) n = FacetNormal(mesh) v1 = TestFunction(W) u", "'upwind') if use_nedelec and pdeg > 1: a, L, V = self._setup_projection_nedelec( w,", "flux through external boundaries a += dot(u, n) * v1 * ds if", "Setting up velocity BDM projection') V = w[0].function_space() ue = V.ufl_element() gdim =", "def run(self, w=None): \"\"\" Perform the projection based on the current state of", "1.0, 0.0) u_hat_dS = switch * w('+') + (1 - switch) * w('-')", "in neumann_bcs + robin_bcs + outlet_bcs: if nbc.enforce_zero_flux: pass # L += 0", "n))) / 2.0 switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0) u_hat_dS = switch *", "w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh) # The mixed function", "incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where the result is BDM embeded", "incompressibility_flux_type in ('central', 'upwind') if use_nedelec and pdeg > 1: a, L, V", "for DG1 # Equation 3 - BDM Phi : empty for DG1 return", "DG2 space and û is the flux at each face. P_{x} is the", "n) * v1 * ds # Equation 2 - internal shape a +=", "φ> ∀ ϕ ∈ P_{k}(F) for any face F ∈ ∂K (u, ϕ)", "w, incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True, use_nedelec=True, ): \"\"\" Implement equation 4a and 4b", "import dot, as_vector, dx, dS, ds, LocalSolver class VelocityBDMProjection: def __init__( self, simulation,", "2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) W = FunctionSpace(mesh, 'DGT',", "'DGT', k) n = FacetNormal(mesh) v1 = TestFunction(W) u = TrialFunction(V) # The", "a += dot(u(R), n(R)) * v1(R) * dS L += dot(u_hat_dS, n(R)) *", "in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, [])", "gdim ) elif gdim == 2 and pdeg == 1: a, L, V", "k = 1 gdim = 2 mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG',", "= VectorFunctionSpace(mesh, 'DG', k) W = FunctionSpace(mesh, 'DGT', k) n = FacetNormal(mesh) v1", "elif gdim == 2 and pdeg == 2: a, L, V = self._setup_dg2_projection_2D(w,", "k) e2 = VectorElement('DG', mesh.ufl_cell(), k - 2) e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3)", "= 0 for R in '+-': a += dot(u(R), n(R)) * v1(R) *", "V def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs): \"\"\" Implement the projection where the", "* v1 * ds if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d'", "in the mesh: <u⋅n, φ> = <û⋅n, φ> ∀ ϕ ∈ P_{k}(F) for", "u_hat_dS = dolfin.avg(w) elif incompressibility_flux_type == 'upwind': w_nU = (dot(w, n) + abs(dot(w,", "else: pdeg = degree Vout = FunctionSpace(V.mesh(), 'DG', degree) pg = (pdeg, gdim)", "1: a, L, V = self._setup_projection_nedelec( w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim )", "= as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of [0, 0, v3b] a += dot(u, v3)", "dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d, []) neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs", "d] neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' % d, [])", "* v1 * ds # Equation 2 - internal shape a += dot(u,", "if D12 is not None: u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w, n) #", "= 0 in K, ϕ⋅n = 0 on ∂K} Here w is the", "if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs =", "= [] for i in range(gdim): self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i))) def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12,", "v1 * ds if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' %", "the projection based on the current state of the Function w \"\"\" #", "= V.ufl_element() gdim = w.ufl_shape[0] if degree is None: pdeg = ue.degree() Vout", "TestFunctions, Function from dolfin import dot, as_vector, dx, dS, ds, LocalSolver class VelocityBDMProjection:", "internal shape : empty for DG1 # Equation 3 - BDM Phi :", "is the space of polynomials of order k The flux type can be", "neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d' % d, []) outlet_bcs", "# Equation 2 - internal shape : empty for DG1 # Equation 3", "use_bcs, pdeg, gdim): \"\"\" Implement the BDM-like projection using Nedelec elements in the", "is not None: u_hat_dS += dolfin.Constant([D12] * gdim) * dolfin.jump(w, n) # Equation", "are used in the incompressibility equation if incompressibility_flux_type == 'central': u_hat_dS = dolfin.avg(w)", "+= w[d] * n[d] * v1 * nbc.ds() for sbc in sim.data['slip_bcs'].get('u', []):", "= self._setup_projection_nedelec( w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim ) elif gdim == 2", "V = VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh) # The mixed function space", "W = FunctionSpace(mesh, em) v1, v2, v3b = TestFunctions(W) u = TrialFunction(V) #", "robin_bcs = sim.data['robin_bcs'].get('u%d' % d, []) outlet_bcs = sim.data['outlet_bcs'] for dbc in dirichlet_bcs:", "L += dot(w, n) * v1 * ds # Equation 2 - internal", "(u, ϕ) = (w, ϕ) ∀ φ ∈ P_{k-2}(K)^2 (u, ϕ) = (w,", "+= dot(u, n) * v1 * ds if use_bcs: for d in range(gdim):", "TestFunctions(W) u = TrialFunction(V) # The same fluxes that are used in the", "= sim.data['outlet_bcs'] for dbc in dirichlet_bcs: u_bc = dbc.func() L += u_bc *", "3 - BDM Phi v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of [0, 0,", "incompressibility_flux_type, D12, use_bcs, pdeg, gdim): \"\"\" Implement the BDM-like projection using Nedelec elements", "* dbc.ds() for nbc in neumann_bcs + robin_bcs + outlet_bcs: if nbc.enforce_zero_flux: pass", "space of polynomials of order k The flux type can be 'central' or", "velocity self.local_solver.solve_local_rhs(self.temp_function) # Assign to w w = self.w if w is None", "k = pdeg mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n =", "projection based on the current state of the Function w \"\"\" # Find", "BDM Phi : empty for DG1 return a, L, V def _setup_dg2_projection_2D(self, w,", "TrialFunction(V) # The same fluxes that are used in the incompressibility equation if", "+= dot(u(R), n(R)) * v1(R) * dS L += dot(u_hat_dS, n(R)) * v1(R)", "and dimension %d' % pg ) # Pre-factorize matrices and store for usage", "self._setup_projection_nedelec( w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim ) elif gdim == 2 and", "LocalSolver(a, L) self.local_solver.factorize() self.temp_function = Function(V) self.w = w # Create function assigners", "2 - internal shape : empty for DG1 # Equation 3 - BDM", "Equation 2 - internal shape using 'Nedelec 1st kind H(curl)' elements a +=", "simulation, w, incompressibility_flux_type='central', D12=None, degree=None, use_bcs=True, use_nedelec=True, ): \"\"\" Implement equation 4a and", "sim = self.simulation k = 2 gdim = 2 mesh = w[0].function_space().mesh() V", "if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d, []) neumann_bcs", "in \"Two new techniques for generating exactly incompressible approximate velocities\" by <NAME> (2009)", "2 - internal shape using 'Nedelec 1st kind H(curl)' elements a += dot(u,", "= MixedElement([e1, e2]) W = FunctionSpace(mesh, em) v1, v2 = TestFunctions(W) u =", ") # Pre-factorize matrices and store for usage in projection self.local_solver = LocalSolver(a,", "gdim = w.ufl_shape[0] if degree is None: pdeg = ue.degree() Vout = V", "is the input velocity function in DG2 space and û is the flux", "for generating exactly incompressible approximate velocities\" by <NAME> (2009) For each element K", "import FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace from dolfin import FacetNormal, TrialFunction, TestFunction, TestFunctions,", "* v1 * nbc.ds() for sbc in sim.data['slip_bcs'].get('u', []): pass # L +=", "): \"\"\" Implement equation 4a and 4b in \"Two new techniques for generating", "as_vector, dx, dS, ds, LocalSolver class VelocityBDMProjection: def __init__( self, simulation, w, incompressibility_flux_type='central',", "simulation.log.info(' Setting up velocity BDM projection') V = w[0].function_space() ue = V.ufl_element() gdim", "projection self.local_solver = LocalSolver(a, L) self.local_solver.factorize() self.temp_function = Function(V) self.w = w #", "e1 = FiniteElement('DGT', mesh.ufl_cell(), k) e2 = VectorElement('DG', mesh.ufl_cell(), k - 2) e3", "v2 = TestFunctions(W) u = TrialFunction(V) # The same fluxes that are used", "1 cont. - flux through external boundaries a += dot(u, n) * v1", "L += 0 else: L += dot(w, n) * v1 * ds #", "= (w, ϕ) ∀ φ ∈ {ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ = 0", "1 - flux through the sides a = L = 0 for R", "v1 * dbc.ds() for nbc in neumann_bcs + robin_bcs + outlet_bcs: if nbc.enforce_zero_flux:", "pdeg mesh = w[0].function_space().mesh() V = VectorFunctionSpace(mesh, 'DG', k) n = FacetNormal(mesh) #", "D12]) * dolfin.jump(w, n) # Equation 1 - flux through the sides a", "w[0].function_space() ue = V.ufl_element() gdim = w.ufl_shape[0] if degree is None: pdeg =", "dot(w, v2) * dx # Equation 3 - BDM Phi v3 = as_vector([v3b.dx(1),", "exactly incompressible approximate velocities\" by <NAME> (2009) For each element K in the", "for nbc in neumann_bcs + robin_bcs + outlet_bcs: if nbc.enforce_zero_flux: pass # L", "result is BDM embeded in a DG2 function \"\"\" sim = self.simulation k", "mesh.ufl_cell(), 3) em = MixedElement([e1, e2, e3]) W = FunctionSpace(mesh, em) v1, v2,", "Implement the projection where the result is BDM embeded in a DG1 function", "- 2) e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3) em = MixedElement([e1, e2, e3]) W", "for R in '+-': a += dot(u(R), n(R)) * v1(R) * dS L", "[]) outlet_bcs = sim.data['outlet_bcs'] for dbc in dirichlet_bcs: u_bc = dbc.func() L +=", "# SPDX-License-Identifier: Apache-2.0 import dolfin from dolfin import FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace", "== 1: a, L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs) elif gdim ==", "0.0), 1.0, 0.0) u_hat_dS = switch * w('+') + (1 - switch) *", "for any face F ∈ ∂K (u, ϕ) = (w, ϕ) ∀ φ", "ds if use_bcs: for d in range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs", "F ∈ ∂K (u, ϕ) = (w, ϕ) ∀ φ ∈ P_{k-2}(K)^2 (u,", "0 for R in '+-': a += dot(u(R), n(R)) * v1(R) * dS", "ue.family() == 'Discontinuous Lagrange' assert incompressibility_flux_type in ('central', 'upwind') if use_nedelec and pdeg", "dot(u, n) * v1 * ds if use_bcs: for d in range(gdim): dirichlet_bcs", "dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0) u_hat_dS = switch * w('+') + (1 - switch)", "+= dot(u, v3) * dx L += dot(w, v3) * dx return a,", "W = FunctionSpace(mesh, em) v1, v2 = TestFunctions(W) u = TrialFunction(V) # The", "- switch) * w('-') if D12 is not None: u_hat_dS += dolfin.Constant([D12] *", "incompressibility_flux_type, D12, use_bcs) elif gdim == 2 and pdeg == 2: a, L,", "dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs =", "through external boundaries a += dot(u, n) * v1 * ds if use_bcs:", "None: u_hat_dS += dolfin.Constant([D12] * gdim) * dolfin.jump(w, n) # Equation 1 -", "range(gdim): dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d] neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs", "None: u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w, n) # Equation 1 - flux", "velocity BDM projection') V = w[0].function_space() ue = V.ufl_element() gdim = w.ufl_shape[0] if", "* w('+') + (1 - switch) * w('-') if D12 is not None:", "equation 4a and 4b in \"Two new techniques for generating exactly incompressible approximate", "dolfin import dot, as_vector, dx, dS, ds, LocalSolver class VelocityBDMProjection: def __init__( self,", "mesh.ufl_cell(), k) e2 = VectorElement('DG', mesh.ufl_cell(), k - 2) e3 = FiniteElement('Bubble', mesh.ufl_cell(),", "sim.data['dirichlet_bcs'].get('u%d' % d, []) neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, []) robin_bcs = sim.data['robin_bcs'].get('u%d'", "ue = V.ufl_element() gdim = w.ufl_shape[0] if degree is None: pdeg = ue.degree()", "in '+-': a += dot(u(R), n(R)) * v1(R) * dS L += dot(u_hat_dS,", "order k The flux type can be 'central' or 'upwind' \"\"\" self.simulation =" ]
[ "('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6,", "a rep said they were going to send him the correct one. They", "{\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias = nltk.sent_tokenize(com) dic_resultado =", "{\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 1 res_esperado = None res =", "\"amod\". Se espera una tupla (\"comment\", \"valid\") \"\"\" indice_raiz = 5 indice_nodo =", "self.assertEqual(res, res_esperado) def test__amod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta", "test__amod_3(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra", "6, 1), ('cop', 6, 2), ('neg', 6, 3), ('det', 6, 4), ('amod', 6,", "test__conj_1(self): \"\"\" Método aúxiliar para manejar las conjunciones de un sustantivo a un", "que elimine toda palabra que no tenga una etiqueta POS de adverbio, sustantivo", "electric sheep, lately? indice_raiz = 7 indice_nodo = 6 lista_pos_lem = [('do', 'VB',", "self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self):", "None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\",", "= 3 lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None),", "to Amazon warehouses. They can't ship what's not available. Nice way to save", "sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\", \"review\"]}", "[('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3, 2), ('case', 7, 4), ('advmod',", "If AMZ is good at one thing these days, it is finding new", "('neg', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('punct', 6, 7)] diccionario_de_aspectos", "feature on an item I bought on AMZ, but cannot ask the question", "Today I had the simplest question about a feature on an item I", "test__nsub_1(self): \"\"\" Prueba el método auxiliar que busca sujetos nominales. Debe encontrar el", "la palabra 'comment' sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario", "indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_5(self): \"\"\" Prueba", "from vendors warehousing to Amazon warehouses. They can't ship what's not available. Nice", "{\"Dream\": [\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado, res)", "el diccionario de aspectos. Se espera que la palabra 'review' sea determinado como", "texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def _combinar_dict(self, dict1, dict2): for", "1), ('det', 4, 2), ('amod', 4, 3), ('dobj', 1, 4), ('punct', 1, 5)]", "('lately', 'RB', None), ('?', '.', None)] arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3,", "\"\"\" com = \"this is not a good example.\" diccionario = {\"example\":[\"example\"]} arbol", "una tupla (\"comment\", \"valid\") \"\"\" indice_raiz = 5 indice_nodo = 4 lista_pos_lem =", "lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self): \"\"\" Dado el siguiente comentario: i really love", "lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado)", "rep said they were going to send him the correct one. They sent", "import unittest from extractor import extractor_de_aspectos from cliente_corenlp import cliente_corenlp from lematizador import", "to save a buck. But keep taking our membership money for services you", "more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self): \"\"\" Prueba el metodo quitar_palabras. Se", "dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self): \"\"\" Dado el siguiente", "'be' self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el método auxiliar que es usado", "3), ('det', 6, 4), ('amod', 6, 5), ('case', 8, 7), ('nmod', 6, 8),", "= None res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_1(self):", "dic_resultado) def test_extractor_2(self): \"\"\" Dado el siguiente comentario: im the red cyclone. Debe", "'a' self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self): \"\"\" Prueba el método auxiliar que es usado", "dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self): \"\"\" Dado el siguiente", "'DT', None), ('is', 'VBZ', None), ('not', 'RB', None), ('a', 'DT', None), ('good', 'JJ',", "for sentencia in sentencias: arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)", "self.ex.quitar_palabras(texto) texto_esperado = \"black really cute\" self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self): \"\"\" Método auxiliar", "indice_nodo = 3 res_esperado = (\"example\", \"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos,", "1), ('nsubj', 5, 2), ('cop', 5, 3), ('advmod', 5, 4), ('punct', 5, 6)]", "} self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self): \"\"\" Pruebas con comentarios reales \"\"\" com =", "self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"comment\", \"valid\") self.assertEqual(res, res_esperado) def test__amod_2(self): \"\"\"", "\"\"\" Dado el siguiente comentario: black cats are really cute. Debe devolver {\"cats\":[\"black\",\"", "lately?\" res = self.ex.quitar_palabras(texto) texto_esperado = \"perfectly electric lately\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self):", "espera que al encontrar una dependencia amod que tiene su propio advmod, se", "levante una excepcion si no recibe el arbol de aspectos en fora de", "= self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't no never", "days, it is finding new and innovated ways to anger their customers. I", "com = \"They sent him the same, wrong item.\" diccionario = {\"item\":[\"item\", \"items\"]}", "res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def test__amod_advmod(self):", "and tell me their policy has not changed. \\\"Two day shipping starts when", "6 indice_nodo = 22 res_esperado = (\"Member\", \"no more\") res = self.ex._extraer_conj(indice_raiz, indice_nodo,", "buscar el lema o la palabra de una tupla pos_lem dado una posición.", "6, 4), ('amod', 6, 5), ('case', 8, 7), ('nmod', 6, 8), ('cc', 6,", "self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self): \"\"\"", "res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res)", "= \"i am a valid comment.\" diccionario = {\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos", "arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2), ('amod', 4, 3), ('dobj', 1,", "'NN', None), (',', ',', None), ('but', 'CC', None), ('no', 'DT', None), ('more', 'JJR',", "com = \"ultimately, it's a sheep\" diccionario = {\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos", "companies to move merchandise from vendors warehousing to Amazon warehouses. They can't ship", "So he had 2 returns to do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias =", "= [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None), ('cyclone', 'NN', None),", "deal with products all the time and use what discounts where I can.", "quitar_palabras. Se espera que elimine toda palabra que no tenga una etiqueta POS", "'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res =", "arbol_de_dependencias=arbol_de_dependencias) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_5(self): \"\"\" Prueba el método auxiliar", "= 3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado = 'a' self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self):", "doesn't have Prime and isn't really internet savvy. After he had bought a", "7 indice_nodo = 6 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream',", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self): \"\"\" Pruebas con comentarios", "test__unir_palabras(self): \"\"\" Método auxiliar que une las palabras de la lista de tuplas.", "are really cute. Debe devolver {\"cats\":[\"black\",\" really cute\"]} \"\"\" com = \"black cats", "diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = None", "None), ('Prime', 'JJ', None), ('member', 'NN', None), ('for', 'IN', None), ('years', 'NNS', None),", "res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\" Prueba el", "= \"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__neg_1(self): \"\"\" Prueba", "palabra esta en el diccionario de aspectos. Se espera que la palabra 'review'", "do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia", "4), ('amod', 6, 5), ('punct', 6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz =", "4, 3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res =", "and has taken to locking people out of their ability to comment on", "None), ('electric', 'JJ', None), ('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None),", "del sustantivos 'example'. Se espera que devuelva ('example','not'). \"\"\" lista_pos_lem = [('this', 'DT',", "never again\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self): \"\"\" Prueba el metodo quitar_palabras. Se espera", "= \"i really love black cats.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com)", "in dict1: if llave in dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave] = dict1[llave] return dict2", "arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_1(self): \"\"\" Dado el siguiente comentario: i am a", "def test__unir_palabras(self): \"\"\" Método auxiliar que une las palabras de la lista de", "'NN', 'comment'), ('.', '.', None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem,", "espera que elimine toda palabra que no tenga una etiqueta POS de adverbio,", "res_esperado = (\"cats\", \"really cute\") res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado,", "busca sujetos nominales. Como el sujeto nominas no va de un adjetivo a", "a print out label for my roommate since he doesn't have Prime and", "('no', 'DT', None), ('more', 'JJR', None), ('!!', '.', None)] arbol_de_dependencias = [('ROOT', 0,", "perfectly electric sheep, lately? indice_raiz = 7 indice_nodo = 6 lista_pos_lem = [('do',", "va de un adjetivo a un sustantivo, debe regresar None. \"\"\" lista_pos_lem =", "encontrar una dependencia amod que tiene su propio advmod, se devuelvan ambos en", "electric sheep, lately?\" diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com)", "perfectly electric sheep, lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com = \"do you dream", "dado una posición. Se espera que de la tupla en la posición 1,", "Test(unittest.TestCase): def setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador() def", "posición. Se espera que de la tupla en la posición 1, devuelve el", "past. I have been a Prime member for years and always received my", "def test_quitar_palabras(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda palabra", "sustantivo o negacion. \"\"\" texto = \"don't say no to cookies, never again\"", "else: dict2[llave] = dict1[llave] return dict2 def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if __name__ ==", "('my', 'PRP$', None), ('merchandise', 'NN', None), ('in', 'IN', None), ('the', 'DT', None), ('desired',", "extractor_de_aspectos from cliente_corenlp import cliente_corenlp from lematizador import lematizador import nltk class Test(unittest.TestCase):", "se encuentra una dependencia con la etiqueta \"advmod\". Se espera que regrese el", "diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado) def", "self.ex.quitar_palabras(texto) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self): \"\"\" Prueba", "= {\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado =", "Debe encontrar el advmod del adjetivo 'electric'. Se espera que devuelva 'perfectly'. \"\"\"", "dream of perfectly electric sheep, lately? indice_raiz = 7 indice_nodo = 6 lista_pos_lem", "self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self): \"\"\" Dado el siguiente comentario: this is not a", "have good experiences with Amazon and its customer service reps, but after todays", "electric sheep, lately?\" res = self.ex.quitar_palabras(texto) texto_esperado = \"perfectly electric lately\" self.assertEqual(res, texto_esperado)", "self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def _combinar_dict(self, dict1, dict2):", "test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el método auxiliar que es usado para buscar el lema", "(',', ',', None), ('lately', 'RB', None), ('?', '.', None)] arbol_de_dependencias = [('ROOT', 0,", "una dependencia con la etiqueta \"advmod\". Se espera que regrese el adverbio del", "and innovated ways to anger their customers. I try to find the best", "None)] diccionario_de_aspectos = {\"sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado =", "Apparently, AMZ does not like this and has taken to locking people out", "8), ('cc', 6, 9), ('advmod', 11, 10), ('conj', 6, 11), ('nmod:poss', 13, 12),", "chat I am horrified at some of the people Amazon employs. Enter employee", "dic_resultado) def test__conj_1(self): \"\"\" Método aúxiliar para manejar las conjunciones de un sustantivo", "question about a feature on an item I bought on AMZ, but cannot", "never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\", 'RB', \"n't\"),('no',", "\"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self): \"\"\" Método aúxiliar para", "en la posición 1, devuelve el lema 'be'. \"\"\" tupla_pos_lem = [('i', 'LS',", "tuplas. \"\"\" texto = \"don't say no to cookies, never again\" lista_pos_lem =", "discounts where I can. Apparently, AMZ does not like this and has taken", "self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def test__amod_advmod(self): \"\"\" En", "self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado = self.ex.extraer(diccionario, arbol,", "('it', 'PRP', None), (\"'s\", 'VBZ', None), ('a', 'DT', None), ('sheep', 'NN', None)] diccionario_de_aspectos", "self.assertEqual(res_esperado, res) def test_extractor_1(self): \"\"\" Dado el siguiente comentario: i am a valid", "None), ('my', 'PRP$', None), ('merchandise', 'NN', None), ('in', 'IN', None), ('the', 'DT', None),", "com = \"do you dream of perfectly electric sheep, lately?\" diccionario = {\"dream\":[\"dream\"],", "pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el método auxiliar que es usado para buscar", "they were going to send him the correct one. They sent him the", "dict2): for llave in dict1: if llave in dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave] =", "dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self): \"\"\" Dado el siguiente", "taking our membership money for services you no longer can provide.\" diccionario =", "lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado", "el metodo quitar_palabras. Se espera que elimine toda palabra que no tenga una", "\"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self): \"\"\" Método aúxiliar para manejar las conjunciones", "this is not a good example. Debe devolver {\"example\":[\"not good\"]} \"\"\" com =", "pasar como argumento el arbol de dependencias que resuelve el Stanford CoreNLP. Prueba", "= 9 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None),", "self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado,", "save money, Amazon no longer uses reliable trucking companies to move merchandise from", "available. Nice way to save a buck. But keep taking our membership money", "tupla en la posición 1, devuelve el lema 'be'. \"\"\" tupla_pos_lem = [('i',", "0, 1), ('det', 4, 2), ('amod', 4, 3), ('dobj', 1, 4), ('punct', 1,", "going to send him the correct one. They sent him the same, wrong", "I got the product with a discount via research on the net.\" diccionario", "of the people Amazon employs. Enter employee Ruchitha. I was trying to get", "de dependencias que resuelve el Stanford CoreNLP. Prueba que el método extraer levante", "'RB', None), ('?', '.', None)] diccionario_de_aspectos = {\"Dream\": [\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo,", "test__neg_1(self): \"\"\" Prueba el método auxiliar que busca negaciones. Debe encontrar la negacion", "self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't no never again\"", "im the red cyclone. Debe devolver {\"cyclone\":[\"red\"]} \"\"\" com = \"im the red", "def setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self):", "negacion del sustantivos 'example'. Se espera que devuelva ('example','not'). \"\"\" lista_pos_lem = [('this',", "2), ('amod', 4, 3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"not\":[\"ok\"]}", "They sent him the same, wrong item. Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com =", "a good example. Debe devolver {\"example\":[\"not good\"]} \"\"\" com = \"this is not", "import nltk class Test(unittest.TestCase): def setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas", "('compound', 18, 17), ('nmod', 11, 18), ('punct', 6, 19), ('cc', 6, 20), ('neg',", "indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self): \"\"\"", "4), ('punct', 5, 6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz = 5 indice_nodo = 2", "really cute.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem", "\"\"\" com = \"Prime 2 day shipping seems to be a thing of", "sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias: arbol = self.cliente.resolver_dependencias(sentencia)", "= \"do you dream of perfectly electric sheep, lately?\" diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]}", "adverbio del sustantivo 'cats'. Se espera que devuelva ('cats', \"really cute\"). \"\"\" lista_pos_lem", "com = \"i really love black cats.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol =", "\"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__neg_1(self): \"\"\" Prueba el", "'RB', None), ('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN', None), ('.', '.',", "lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\", 'RB', \"n't\"),('no', 'DT', None),", "\"\"\" com = \"i am a valid comment.\" diccionario = {\"comment\":[\"comment\"]} arbol =", "lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__amod_3(self): \"\"\" Prueba el", "devuelva ('example','not'). \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB',", "= [('I', 'PRP', None), ('have', 'VBP', None), ('been', 'VBN', None), ('a', 'DT', None),", "advmod del adjetivo 'electric'. Se espera que devuelva 'perfectly'. \"\"\" indice_nodo = 6", "3), ('aux', 3, 1), ('nsubj', 3, 2), ('case', 7, 4), ('advmod', 6, 5),", "'.', None)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado =", "indice_raiz = 5 indice_nodo = 2 res_esperado = (\"cats\", \"really cute\") res =", "que devuelva ('example','not'). \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not',", "una tupla (\"cyclone\", \"red\") \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem =", "'RB', \"n't\"),('no', 'DT', None), ('never', 'RB', \"never\"), ('again', 'RB', \"again\")] self.assertEqual(res, tupla_esperada) def", "tupla_esperada) def test__unir_palabras(self): \"\"\" Método auxiliar que une las palabras de la lista", "try to find the best deal with products all the time and use", "dic_resultado) def test_extractor_3(self): \"\"\" Dado el siguiente comentario: do you dream of perfectly", "que busca sujetos nominales. Como el sujeto nominas no va de un adjetivo", "seems to be a thing of the past. I have been a Prime", "cats.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem =", "this and has taken to locking people out of their ability to comment", "test__es_aspecto_3(self): \"\"\" Prueba el método auxiliar que es usado para determinar si una", "('perfectly', 'RB', None), ('electric', 'JJ', None), ('sheep', 'NN', None), (',', ',', None), ('lately',", "6 indice_nodo = 1 res_esperado = None res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos,", "'valid' del aspecto 'comment' \"\"\" com = \"i am a valid comment.\" diccionario", "None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice = 1", "day shipping seems to be a thing of the past. I have been", "me the runaround and tell me their policy has not changed. \\\"Two day", "again\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que", "None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz,", "res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__neg_1(self): \"\"\" Prueba el método", "products all the time and use what discounts where I can. Apparently, AMZ", "Se espera que regrese el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia').", "had the simplest question about a feature on an item I bought on", "a Prime member for years and always received my merchandise in the desired", "= self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self): \"\"\" Prueba el método auxiliar que", "None), ('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None), ('?', '.', None)]", "aspectos. Se espera que la palabra 'comment' sea determinado como aspecto 'comment'. \"\"\"", "ship if the items are not in their warehouses, seemly blaming the vendors.", "('dream', 'NN', None), ('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None), ('sheep',", "self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self):", "el diccionario de aspectos. Se espera que la palabra 'comment' sea determinado como", "('punct', 6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 3", "devolver {\"cats\":[\"black\"} \"\"\" com = \"i really love black cats.\" diccionario = {\"cats\":[\"cat\",", "\"Usually I have good experiences with Amazon and its customer service reps, but", "'.', None)] arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2), ('amod', 4, 3),", "= (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__advmod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia", "no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada =", "cute\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self): \"\"\" Dado el", "comment. Debe devolver el adjetivo 'valid' del aspecto 'comment' \"\"\" com = \"i", "self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado", "self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self): \"\"\" Dado el siguiente comentario: i", "black cats.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem", "('comment', 'NN', 'comment'), ('.', '.', None)] indice = 3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem)", "via research on the net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"],", "o la palabra de una tupla pos_lem dado una posición. Se espera que", "= (\"comment\", \"valid\") self.assertEqual(res, res_esperado) def test__amod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia", "lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None), ('of', 'IN',", "diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__advmod_1(self): \"\"\" Prueba el método", "[('black', 'JJ', None), ('cats', 'NNS', None), ('are', 'VBP', None), ('really', 'RB', None), ('cute',", "None self.assertEqual(res, res_esperado) def test__amod_4(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se", "None), ('really', 'RB', None), ('cute', 'JJ', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT',", "def test__es_aspecto_2(self): \"\"\" Prueba el método auxiliar que es usado para determinar si", "tupla_esperada = [(\"n't\", 'RB', \"n't\"),('no', 'DT', None), ('never', 'RB', \"never\"), ('again', 'RB', \"again\")]", "employee Ruchitha. I was trying to get a print out label for my", "Prime and isn't really internet savvy. After he had bought a dvd that", "com = \"black cats are really cute.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol =", "print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self): \"\"\" Pruebas con comentarios reales \"\"\" com =", "5)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado =", "test_extractor_2(self): \"\"\" Dado el siguiente comentario: im the red cyclone. Debe devolver {\"cyclone\":[\"red\"]}", "{\"cats\":[\"cats\"]} indice_raiz = 5 indice_nodo = 2 res_esperado = (\"cats\", \"really cute\") res", "6, 9), ('advmod', 11, 10), ('conj', 6, 11), ('nmod:poss', 13, 12), ('dobj', 11,", "determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\"]} resultado =", "('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo,", "None)] diccionario_de_aspectos = {\"Dream\": [\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado =", "dic_resultado) def test_quitar_palabras(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda", "se ejecuta cuando se encuentra una dependencia con la etiqueta \"advmod\". Se espera", "(\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def test__amod_advmod(self): \"\"\" En algunas ocaciones, adjetivos de un sustantivo", "innecesarias. \"\"\" texto = \"don't say no to cookies, never again\" lista_pos_lem =", "= {\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self): \"\"\" Prueba el", "('.', '.', None)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado", "como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra,", "= extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder", "} self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self): \"\"\" Método aúxiliar para manejar las conjunciones de", "= self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)", "el siguiente comentario: this is not a good example. Debe devolver {\"example\":[\"not good\"]}", "perfectly electric sheep, lately? indice_raiz = 3 indice_nodo = 9 lista_pos_lem = [('do',", "None), ('and', 'CC', None), ('always', 'RB', None), ('received', 'VBD', None), ('my', 'PRP$', None),", "\"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really", "electric sheep, lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com = \"do you dream of", "al encontrar una dependencia amod que tiene su propio advmod, se devuelvan ambos", "\"don't say no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem)", "('electric', 'JJ', None), ('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None), ('?',", "Debe devolver {\"cyclone\":[\"red\"]} \"\"\" com = \"im the red cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]}", "self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self): \"\"\" Dado el siguiente comentario: i really love black", "7), ('punct', 3, 8), ('advmod', 3, 9), ('punct', 3, 10)] diccionario_de_aspectos = {\"Sheep\":", "diccionario = dict() arbol = None pos_lem = list() with self.assertRaises(Exception): self.ex.extraer(com, diccionario,", "a buck. But keep taking our membership money for services you no longer", "POS de adverbio, sustantivo o negacion. \"\"\" texto = \"don't say no to", "def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el método auxiliar que es usado para buscar el", "el siguiente comentario: black cats are really cute. Debe devolver {\"cats\":[\"black\",\" really cute\"]}", "de adverbio, sustantivo o negacion. \"\"\" texto = \"don't say no to cookies,", "def test__es_aspecto_3(self): \"\"\" Prueba el método auxiliar que es usado para determinar si", "{\"item\":[\"same\",\"wrong\"]} \"\"\" com = \"They sent him the same, wrong item.\" diccionario =", "con comentarios reales \"\"\" com = \"Usually I have good experiences with Amazon", "nominas no va de un adjetivo a un sustantivo, debe regresar None. \"\"\"", "tiene su propio advmod, se devuelvan ambos en un solo string. Se espera", "from extractor import extractor_de_aspectos from cliente_corenlp import cliente_corenlp from lematizador import lematizador import", "apparently, I am persona non grata these days. I got the product with", "self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado,", "self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado,", "1, 5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado =", "= self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado)", "lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None), ('a', 'DT',", "elimine toda palabra que no tenga una etiqueta POS de adverbio, sustantivo o", "not paying the top price. Today I had the simplest question about a", "list() with self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el método", "test__amod_5(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra", "None), (',', ',', None), ('lately', 'RB', None), ('?', '.', None)] diccionario_de_aspectos = {\"Dream\":", "= self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__neg_1(self): \"\"\" Prueba el método auxiliar", "diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 3 res_esperado = (\"example\",", "None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz,", "no sea determinado como aspecto y devuelva None. \"\"\" palabra = 'review' diccionario", "'.', None)] diccionario_de_aspectos = {\"Dream\": [\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado", "warehouse\\\". They can't ship if the items are not in their warehouses, seemly", "an item I bought on AMZ, but cannot ask the question as apparently,", "self.assertEqual(res, res_esperado) def test__advmod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta", "None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"not\":[\"ok\"]}", "{\"sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res)", "arbol de dependencias que resuelve el Stanford CoreNLP. Prueba que el método extraer", "of perfectly electric sheep, lately? indice_raiz = 3 indice_nodo = 9 lista_pos_lem =", "en el diccionario de aspectos. Se espera que la palabra 'comment' sea determinado", "\"valid\") \"\"\" indice_raiz = 5 indice_nodo = 4 lista_pos_lem = [('i', 'LS', None),", "('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0,", "as apparently, I am persona non grata these days. I got the product", "llave in dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave] = dict1[llave] return dict2 def tearDown(self): self.cliente.cerrar_servicio()", "= [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'),", "'.', None)] indice = 3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado = 'a' self.assertEqual(resultado,", "dependencias que resuelve el Stanford CoreNLP. Prueba que el método extraer levante una", "cliente_corenlp.resolver_dependencias). \"\"\" com = \"i am a valid comment.\" diccionario = dict() arbol", "'PRP', None), ('dream', 'NN', None), ('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ',", "diccionario_esperado = {\"example\":[\"not\", \"good\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self):", "res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__advmod_1(self): \"\"\" Prueba el método auxiliar", "\"red\") self.assertEqual(res, res_esperado) def test__advmod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se", "la palabra de una tupla pos_lem dado una posición. Se espera que de", "'NN', None), ('for', 'IN', None), ('years', 'NNS', None), ('and', 'CC', None), ('always', 'RB',", "self.assertEqual(res_esperado, res) def test_extractor_11(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"Prime", "if the items are not in their warehouses, seemly blaming the vendors. Shame", "the same, wrong item. Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com = \"They sent him", "diccionario de aspectos. Se espera que la palabra 'review' sea determinado como aspecto", "resultado) def test__amod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando", "una excepcion si no recibe el arbol de aspectos en fora de una", "= {\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self): \"\"\" Dado", "to move merchandise from vendors warehousing to Amazon warehouses. They can't ship what's", "4 indice_nodo = 3 lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red',", "example.\" diccionario = {\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)", "do is give me the runaround and tell me their policy has not", "= \"black really cute\" self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self): \"\"\" Método auxiliar que es", "cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\", 'RB',", "'JJ', None), ('example', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6),", "Amazon employs. Enter employee Ruchitha. I was trying to get a print out", "= dict() for sentencia in sentencias: arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem", "= self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]} dic_resultado", "'comment' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self): \"\"\"", "test_extractor_10(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"There was a time", "res_esperado) def test__amod_4(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando", "= {\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado =", "a valid comment.\" diccionario = {\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem", "None pos_lem = list() with self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\"", "\"\"\" Prueba el método auxiliar que busca sujetos nominales. Debe encontrar el adjetivo", "to comment on products if they feel you are not paying the top", "que se ejecuta cuando se encuentra una dependencia con la etiqueta \"advmod\". Se", "la palabra 'a', ya que el lema es None. \"\"\" tupla_pos_lem = [('i',", "\"really cute\") res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__nsub_2(self):", "palabra 'comment' sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario =", "None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2), ('amod',", "self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"],", "('cop', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('case', 8, 7), ('nmod',", "sheep\" diccionario = {\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)", "res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"comment\", \"valid\") self.assertEqual(res, res_esperado) def", "are long past. If AMZ is good at one thing these days, it", "= {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self): \"\"\" Pruebas con comentarios", "people out of their ability to comment on products if they feel you", "('nsubj', 5, 2), ('cop', 5, 3), ('advmod', 5, 4), ('punct', 5, 6)] diccionario_de_aspectos", "but those days are long past. If AMZ is good at one thing", "Dado el siguiente comentario: i really love black cats. Debe devolver {\"cats\":[\"black\"} \"\"\"", "fan-boy, but those days are long past. If AMZ is good at one", "6, 3), ('det', 6, 4), ('amod', 6, 5), ('punct', 6, 7)] diccionario_de_aspectos =", "6, 20), ('neg', 22, 21), ('conj', 6, 22), ('punct', 6, 23)] diccionario_de_aspectos =", "Prueba el método auxiliar que busca sujetos nominales. Como el sujeto nominas no", "('nsubj', 6, 1), ('cop', 6, 2), ('neg', 6, 3), ('det', 6, 4), ('amod',", "never again\" res = self.ex.quitar_palabras(texto) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado)", "= self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado =", "el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # do you", "= self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def", "indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def test__amod_advmod(self): \"\"\" En algunas", "\"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for", "3 lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None), ('cyclone',", "texto_esperado) def _combinar_dict(self, dict1, dict2): for llave in dict1: if llave in dict2.keys():", "of perfectly electric sheep, lately?\" diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos", "diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self):", "en la posición 3, devuelve la palabra 'a', ya que el lema es", "indice_nodo = 22 res_esperado = (\"Member\", \"no more\") res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem,", "'NN', 'comment'), ('.', '.', None)] indice = 1 resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado", "be a thing of the past. I have been a Prime member for", "adverbio, sustantivo o negacion. \"\"\" texto = \"black cats are really cute.\" res", "but no more!! I have had numerous conversations with customer service and supervisors.", "{\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self): \"\"\" Prueba el metodo", "indice_raiz = 5 indice_nodo = 4 lista_pos_lem = [('i', 'LS', None), ('am', 'VBP',", "resultado) def test__es_aspecto_2(self): \"\"\" Prueba el método auxiliar que es usado para determinar", "10)] res_esperado = \"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__neg_1(self):", "and always received my merchandise in the desired time frame, but no more!!", "adjetivo a un sustantivo, debe regresar None. \"\"\" lista_pos_lem = [('this', 'DT', None),", "it is finding new and innovated ways to anger their customers. I try", "{\"cats\":[\"black\"} \"\"\" com = \"i really love black cats.\" diccionario = {\"cats\":[\"cat\", \"cats\"]}", "siguiente comentario: this is not a good example. Debe devolver {\"example\":[\"not good\"]} \"\"\"", "to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada)", "arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self): \"\"\" Dado el siguiente comentario: this is", "\"\"\" palabra = 'review' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado)", "None)] indice = 3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado = 'a' self.assertEqual(resultado, resultado_esperado)", "\"\"\" Pruebas con comentarios reales \"\"\" com = \"There was a time I", "self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\",", "res) def test__nsub_2(self): \"\"\" Prueba el método auxiliar que busca sujetos nominales. Como", "self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def", "sujetos nominales. Como el sujeto nominas no va de un adjetivo a un", "en un solo string. Se espera (\"sheep\", \"perfectly electric\") \"\"\" # do you", "que se ejecuta cuando se encuentra una dependencia con la etiqueta \"amod\". Se", "[('ROOT', 0, 5), ('amod', 2, 1), ('nsubj', 5, 2), ('cop', 5, 3), ('advmod',", "adjetivo 'electric'. Se espera que devuelva 'perfectly'. \"\"\" indice_nodo = 6 lista_pos_lem =", "love black cats.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com)", "espera una tupla (\"cyclone\", \"red\") \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem", "for services you no longer can provide.\" diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias", "= self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def", "been a Prime member for years and always received my merchandise in the", "products if they feel you are not paying the top price. Today I", "\"perfectly electric lately\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self): \"\"\" Prueba el metodo quitar_palabras. Se", "método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la", "texto_esperado = \"black really cute\" self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self): \"\"\" Método auxiliar que", "None), ('example', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj',", "Como el sujeto nominas no va de un adjetivo a un sustantivo, debe", "Se espera (\"sheep\", \"perfectly electric\") \"\"\" # do you dream of perfectly electric", "[('ROOT', 0, 6), ('nsubj', 6, 1), ('aux', 6, 2), ('cop', 6, 3), ('det',", "comentario: ultimately, it's a sheep Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\" com = \"ultimately, it's", "= self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado) def test__amod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia", "None), ('more', 'JJR', None), ('!!', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj',", "res) def test__neg_1(self): \"\"\" Prueba el método auxiliar que busca negaciones. Debe encontrar", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self): \"\"\" Dado el siguiente comentario:", "where I can. Apparently, AMZ does not like this and has taken to", "grata these days. I got the product with a discount via research on", "= self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def", "de adverbio, sustantivo o negacion. \"\"\" texto = \"do you dream of perfectly", "'.', None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado =", "self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"Usually", "tenga una etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto = \"black", "diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)", "('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 5), ('amod', 2, 1), ('nsubj', 5,", "dic_resultado) def test_extractor_9(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"Usually I", "texto_esperado = \"perfectly electric lately\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self): \"\"\" Prueba el metodo", "el lema o la palabra de una tupla pos_lem dado una posición. Se", "devuelve el lema 'be'. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'),", "devuelva ('cats', \"really cute\"). \"\"\" lista_pos_lem = [('black', 'JJ', None), ('cats', 'NNS', None),", "self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self): \"\"\" Dado el siguiente comentario: im", "2, 1), ('nsubj', 5, 2), ('cop', 5, 3), ('advmod', 5, 4), ('punct', 5,", "country, he called customer service and a rep said they were going to", "savvy. After he had bought a dvd that wasn't playable in the country,", "None)] arbol_de_dependencias = [('ROOT', 0, 5), ('amod', 2, 1), ('nsubj', 5, 2), ('cop',", "en fora de una lista (la salida que ofrece cliente_corenlp.resolver_dependencias). \"\"\" com =", "método extraer levante una excepcion si no recibe el arbol de aspectos en", "com = \"i am a valid comment.\" diccionario = dict() arbol = None", "sentencias: arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario,", "= self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)", "= 'review' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado) def test__amod_1(self):", "dream of perfectly electric sheep, lately?\" res = self.ex.quitar_palabras(texto) texto_esperado = \"perfectly electric", "'NNS', None), ('are', 'VBP', None), ('really', 'RB', None), ('cute', 'JJ', None), ('.', '.',", "la etiqueta \"amod\". Se espera una tupla (\"cyclone\", \"red\") \"\"\" indice_raiz = 4", "res_esperado = None res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def", "and its customer service reps, but after todays online customer service chat I", "como argumento el arbol de dependencias que resuelve el Stanford CoreNLP. Prueba que", "\"\"\" lista_pos_lem = [('black', 'JJ', None), ('cats', 'NNS', None), ('are', 'VBP', None), ('really',", "'JJR', None), ('!!', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1),", "1 res_esperado = None res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res)", "lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com = \"do you dream of perfectly electric", "18), ('punct', 6, 19), ('cc', 6, 20), ('neg', 22, 21), ('conj', 6, 22),", "\"never\"), ('again', 'RB', \"again\")] self.assertEqual(res, tupla_esperada) def test__unir_palabras(self): \"\"\" Método auxiliar que une", "and a rep said they were going to send him the correct one.", "res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"],", "setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\"", "are really cute.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com)", "lista_pos_lem = [('black', 'JJ', None), ('cats', 'NNS', None), ('are', 'VBP', None), ('really', 'RB',", "auxiliar que une las palabras de la lista de tuplas. \"\"\" texto =", "la lista de tuplas. \"\"\" texto = \"don't say no to cookies, never", "texto = \"black cats are really cute.\" res = self.ex.quitar_palabras(texto) texto_esperado = \"black", "espera que devuelva ('cats', \"really cute\"). \"\"\" lista_pos_lem = [('black', 'JJ', None), ('cats',", "= 'a' self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self): \"\"\" Prueba el método auxiliar que es", "diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_1(self): \"\"\" Dado el siguiente comentario: i am", "siguiente comentario: i really love black cats. Debe devolver {\"cats\":[\"black\"} \"\"\" com =", "if they feel you are not paying the top price. Today I had", "a time I was a super-Amazon fan-boy, but those days are long past.", "pos_lem dado una posición. Se espera que de la tupla en la posición", "\"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que no", "\"do you dream of perfectly electric sheep, lately?\" res = self.ex.quitar_palabras(texto) texto_esperado =", "indice_nodo = 1 res_esperado = None res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias)", "best deal with products all the time and use what discounts where I", "self.assertEqual(res, res_esperado) def test__amod_3(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta", "self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self): \"\"\" Dado el siguiente comentario: do", "\"\"\" Pruebas con comentarios reales \"\"\" com = \"Usually I have good experiences", "que busca dependencias de dependencias. Debe encontrar el advmod del adjetivo 'electric'. Se", "dependencia con la etiqueta \"amod\". Se espera una tupla (\"cyclone\", \"red\") \"\"\" indice_raiz", "bought a dvd that wasn't playable in the country, he called customer service", "{\"Member\":[\"member\"]} indice_raiz = 6 indice_nodo = 22 res_esperado = (\"Member\", \"no more\") res", "dict() for sentencia in sentencias: arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem =", "'RB', None), ('electric', 'JJ', None), ('sheep', 'NN', None), (',', ',', None), ('lately', 'RB',", "lista_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ',", "= {\"item\":[\"item\", \"items\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado", "res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def test__advmod_2(self):", "('neg', 22, 21), ('conj', 6, 22), ('punct', 6, 23)] diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz", "devolver el adjetivo 'valid' del aspecto 'comment' \"\"\" com = \"i am a", "{\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self): \"\"\"", "('advmod', 11, 10), ('conj', 6, 11), ('nmod:poss', 13, 12), ('dobj', 11, 13), ('case',", "'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.',", "= {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado", "supervisors. All they do is give me the runaround and tell me their", "de tuplas para eliminar las palabras innecesarias. \"\"\" texto = \"don't say no", "excepcion si no recibe el arbol de aspectos en fora de una lista", "método auxiliar que busca sujetos nominales. Como el sujeto nominas no va de", "to anger their customers. I try to find the best deal with products", "sheep Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\" com = \"ultimately, it's a sheep\" diccionario =", "out label for my roommate since he doesn't have Prime and isn't really", "= (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def test__advmod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que", "aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario)", "que tiene su propio advmod, se devuelvan ambos en un solo string. Se", "determinado como aspecto y devuelva None. \"\"\" palabra = 'review' diccionario = {\"comment\":[\"comment\"]}", "cannot ask the question as apparently, I am persona non grata these days.", "[('ultimately', 'RB', None), (',', ',', None), ('it', 'PRP', None), (\"'s\", 'VBZ', None), ('a',", "None), ('are', 'VBP', None), ('really', 'RB', None), ('cute', 'JJ', None), ('.', '.', None)]", "self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self): \"\"\" Método auxiliar que es el que recorre las", "comment.\" diccionario = dict() arbol = None pos_lem = list() with self.assertRaises(Exception): self.ex.extraer(com,", "'DT', None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice =", "None), ('a', 'DT', None), ('sheep', 'NN', None)] diccionario_de_aspectos = {\"sheep\": [\"sheep\"]} res =", "diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__amod_3(self): \"\"\" Prueba el método", "arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('aux', 6, 2), ('cop', 6,", "electric lately\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self): \"\"\" Prueba el metodo quitar_palabras. Se espera", "self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_11(self): \"\"\" Pruebas con comentarios", "com = \"Prime 2 day shipping seems to be a thing of the", "do you dream of perfectly electric sheep, lately? indice_raiz = 3 indice_nodo =", "que no tenga una etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto", "in sentencias: arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res =", "aspecto y devuelva None. \"\"\" palabra = 'review' diccionario = {\"comment\":[\"comment\"]} resultado =", "('nmod', 3, 7), ('punct', 3, 8), ('advmod', 3, 9), ('punct', 3, 10)] res_esperado", "'NN', None), ('in', 'IN', None), ('the', 'DT', None), ('desired', 'JJ', None), ('time', 'NN',", "really love black cats. Debe devolver {\"cats\":[\"black\"} \"\"\" com = \"i really love", "'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice = 3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1,", "'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice = 1 resultado =", "sustantivo, debe regresar None. \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None),", "their customers. I try to find the best deal with products all the", "None)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = None", "'VBP', None), ('been', 'VBN', None), ('a', 'DT', None), ('Prime', 'JJ', None), ('member', 'NN',", "que devuelva ('cats', \"really cute\"). \"\"\" lista_pos_lem = [('black', 'JJ', None), ('cats', 'NNS',", "('comment', 'NN', 'comment'), ('.', '.', None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo,", "{\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias: arbol", "se devuelvan ambos en un solo string. Se espera (\"sheep\", \"perfectly electric\") \"\"\"", "8), ('advmod', 3, 9), ('punct', 3, 10)] diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]} res =", "\"n't no never again\" self.assertEqual(res, texto_esperado) def _combinar_dict(self, dict1, dict2): for llave in", "i really love black cats. Debe devolver {\"cats\":[\"black\"} \"\"\" com = \"i really", "to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\",", "Dado el siguiente comentario: this is not a good example. Debe devolver {\"example\":[\"not", "el método auxiliar que busca sujetos nominales. Debe encontrar el adjetivo y adverbio", "6, 5), ('case', 8, 7), ('nmod', 6, 8), ('cc', 6, 9), ('advmod', 11,", "diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz = 6 indice_nodo = 22 res_esperado = (\"Member\", \"no", "blaming the vendors. Shame on you Amazon for not telling the truth. To", "self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\", 'RB', \"n't\"),('no', 'DT', None), ('never', 'RB', \"never\"), ('again', 'RB',", "are not in their warehouses, seemly blaming the vendors. Shame on you Amazon", "a feature on an item I bought on AMZ, but cannot ask the", "que devuelva 'perfectly'. \"\"\" indice_nodo = 6 lista_pos_lem = [('do', 'VB', None), ('you',", "'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'), ('comment', 'NN',", "4, 3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res =", "self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\" Prueba el método auxiliar que busca sujetos nominales. Debe", "lately\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self): \"\"\" Dado el siguiente comentario:", "received my merchandise in the desired time frame, but no more!! I have", "la negacion del sustantivos 'example'. Se espera que devuelva ('example','not'). \"\"\" lista_pos_lem =", "= self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado =", "tupla_pos_lem) resultado_esperado = 'be' self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el método auxiliar", "5), ('amod', 7, 6), ('nmod', 3, 7), ('punct', 3, 8), ('advmod', 3, 9),", "= 5 indice_nodo = 4 lista_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'),", "'JJ', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 5), ('amod', 2, 1),", "diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz = 5 indice_nodo = 2 res_esperado = (\"cats\", \"really", "= {\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado =", "self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_4(self): \"\"\" Prueba", "amod que tiene su propio advmod, se devuelvan ambos en un solo string.", "'JJ', None), ('member', 'NN', None), ('for', 'IN', None), ('years', 'NNS', None), ('and', 'CC',", "etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]} dic_resultado = self.ex.extraer(diccionario,", "el siguiente comentario: They sent him the same, wrong item. Debe devolver {\"item\":[\"same\",\"wrong\"]}", "lately?\" diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem =", "he had bought a dvd that wasn't playable in the country, he called", "'JJ', None), ('cats', 'NNS', None), ('are', 'VBP', None), ('really', 'RB', None), ('cute', 'JJ',", "lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None), ('cyclone', 'NN',", "de aspectos. Se espera que la palabra 'comment' sea determinado como aspecto 'comment'.", "en el diccionario de aspectos. Se espera que la palabra 'review' sea determinado", "el siguiente comentario: i am a valid comment. Debe devolver el adjetivo 'valid'", "\"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid',", "{\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado) def test__amod_1(self): \"\"\" Prueba el método", "indice_nodo = 3 lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ',", "= 3 res_esperado = (\"example\", \"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias)", "aspectos. Se espera que la palabra 'review' sea determinado como aspecto 'comment'. \"\"\"", "merchandise from vendors warehousing to Amazon warehouses. They can't ship what's not available.", "the warehouse\\\". They can't ship if the items are not in their warehouses,", "test_extractor_6(self): \"\"\" Dado el siguiente comentario: i really love black cats. Debe devolver", "'NN', None), (',', ',', None), ('lately', 'RB', None), ('?', '.', None)] arbol_de_dependencias =", "encontrar la negacion del sustantivos 'example'. Se espera que devuelva ('example','not'). \"\"\" lista_pos_lem", "('aux', 6, 2), ('cop', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('case',", "determinar si una palabra esta en el diccionario de aspectos. Se espera que", "tupla (\"cyclone\", \"red\") \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem = [('im',", "a discount via research on the net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"],", "('nmod', 3, 7), ('punct', 3, 8), ('advmod', 3, 9), ('punct', 3, 10)] diccionario_de_aspectos", "\"black cats are really cute.\" res = self.ex.quitar_palabras(texto) texto_esperado = \"black really cute\"", "sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # ultimately, it's a sheep indice_raiz", "\"\"\" En algunas ocaciones, adjetivos de un sustantivo poseen su propio adverbio. Esta", "wrong item. Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com = \"They sent him the same,", "(',', ',', None), ('but', 'CC', None), ('no', 'DT', None), ('more', 'JJR', None), ('!!',", "= {\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self): \"\"\"", "he called customer service and a rep said they were going to send", "5, 3), ('advmod', 5, 4), ('punct', 5, 6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz =", "aúxiliar para manejar las conjunciones de un sustantivo a un adverbio/adjetivo \"\"\" lista_pos_lem", "self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado", "texto = \"don't say no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada", "1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem,", "'RB', None), ('cute', 'JJ', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 5),", "lema es None. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a',", "tupla: ('sustantivo', 'dependencia'). \"\"\" # do you dream of perfectly electric sheep, lately?", "3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado = 'a' self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self): \"\"\"", "= {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self):", "= self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado)", "indice_raiz = 6 indice_nodo = 1 lista_pos_lem = [('ultimately', 'RB', None), (',', ',',", "cuando se encuentra una dependencia con la etiqueta \"amod\". Se espera None \"\"\"", "res_esperado = (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self): \"\"\" Prueba el método auxiliar", "un sustantivo, debe regresar None. \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ',", "arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\", \"good\"]}", "resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado = 'be' self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba", "some of the people Amazon employs. Enter employee Ruchitha. I was trying to", "res_esperado) def test__amod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando", "Nice way to save a buck. But keep taking our membership money for", "res_esperado = (\"Member\", \"no more\") res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado,", "Se espera una tupla (\"comment\", \"valid\") \"\"\" indice_raiz = 5 indice_nodo = 4", "\"\"\" com = \"ultimately, it's a sheep\" diccionario = {\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com)", "cute. Debe devolver {\"cats\":[\"black\",\" really cute\"]} \"\"\" com = \"black cats are really", "lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"],", "None), ('cyclone', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 1), ('det',", "test__advmod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra", "('sustantivo', 'dependencia'). \"\"\" # do you dream of perfectly electric sheep, lately? indice_raiz", "[('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'), ('comment',", "3, 10)] diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias)", "com = \"im the red cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos", "indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_4(self): \"\"\" Prueba el", "'DT', None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] arbol_de_dependencias =", "indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__advmod_1(self): \"\"\" Prueba", "dream of perfectly electric sheep, lately?\" diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com)", "nominales. Debe encontrar el adjetivo y adverbio del sustantivo 'cats'. Se espera que", "= {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = None self.assertEqual(res,", "'comment'), ('.', '.', None)] indice = 3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado =", "self.assertEqual(res, texto_esperado) def _combinar_dict(self, dict1, dict2): for llave in dict1: if llave in", "Dado el siguiente comentario: ultimately, it's a sheep Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\" com", "you are not paying the top price. Today I had the simplest question", "test_quitar_palabras(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que", "indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"comment\", \"valid\") self.assertEqual(res, res_esperado) def test__amod_2(self): \"\"\" Prueba", "una tupla: ('sustantivo', 'dependencia'). \"\"\" # ultimately, it's a sheep indice_raiz = 6", "13, 12), ('dobj', 11, 13), ('case', 18, 14), ('det', 18, 15), ('amod', 18,", "= self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]}", "you dream of perfectly electric sheep, lately?\" diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol =", "encuentra una dependencia con la etiqueta \"amod\". Se espera una tupla (\"comment\", \"valid\")", "cute\" self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self): \"\"\" Método auxiliar que es el que recorre", "None), ('.', '.', None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)", "dependencias de dependencias. Debe encontrar el advmod del adjetivo 'electric'. Se espera que", "3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz,", "adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # do you dream", "'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res =", "Se espera que la palabra 'comment' sea determinado como aspecto 'comment'. \"\"\" palabra", "ultimately, it's a sheep Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\" com = \"ultimately, it's a", "cats. Debe devolver {\"cats\":[\"black\"} \"\"\" com = \"i really love black cats.\" diccionario", "Se espera None \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem = [('im',", "res_esperado = \"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__neg_1(self): \"\"\"", "longer uses reliable trucking companies to move merchandise from vendors warehousing to Amazon", "res) def test__advmod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando", "('nsubj', 3, 2), ('case', 7, 4), ('advmod', 6, 5), ('amod', 7, 6), ('nmod',", "lema o la palabra de una tupla pos_lem dado una posición. Se espera", "move merchandise from vendors warehousing to Amazon warehouses. They can't ship what's not", "= self.ex.quitar_palabras(texto) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self): \"\"\"", "self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\", \"good\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def", "'comment' diccionario = {\"comment\":[\"comment\", \"review\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self):", "self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado)", "adverbio, sustantivo o negacion. \"\"\" texto = \"don't say no to cookies, never", "self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_5(self): \"\"\"", "you Amazon for not telling the truth. To save money, Amazon no longer", "self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self):", "\"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self): \"\"\" Método aúxiliar para manejar las", "advmod, se devuelvan ambos en un solo string. Se espera (\"sheep\", \"perfectly electric\")", "= 6 indice_nodo = 22 res_esperado = (\"Member\", \"no more\") res = self.ex._extraer_conj(indice_raiz,", "res = self.ex.quitar_palabras(texto) texto_esperado = \"perfectly electric lately\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self): \"\"\"", "arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self): \"\"\" Pruebas con comentarios reales \"\"\"", "tuplas para eliminar las palabras innecesarias. \"\"\" texto = \"don't say no to", "no tenga una etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto =", "espera (\"sheep\", \"perfectly electric\") \"\"\" # do you dream of perfectly electric sheep,", "me their policy has not changed. \\\"Two day shipping starts when the item", "('.', '.', None)] indice = 1 resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado = 'be'", "4 lista_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid',", "\"\"\" texto = \"do you dream of perfectly electric sheep, lately?\" res =", "(\"Member\", \"no more\") res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def", "('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice = 3 resultado", "que al encontrar una dependencia amod que tiene su propio advmod, se devuelvan", "question as apparently, I am persona non grata these days. I got the", "[('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None), ('a', 'DT', None), ('good',", "= {\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self): \"\"\" Dado", "adverbio. Esta prueba espera que al encontrar una dependencia amod que tiene su", "posición 3, devuelve la palabra 'a', ya que el lema es None. \"\"\"", "None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]}", "('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]} res", "que el lema es None. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP',", "None), ('a', 'DT', None), ('Prime', 'JJ', None), ('member', 'NN', None), ('for', 'IN', None),", "on an item I bought on AMZ, but cannot ask the question as", "were going to send him the correct one. They sent him the same,", "\"\"\" Para poder extraer los aspectos, primero se necesita pasar como argumento el", "reales \"\"\" com = \"Usually I have good experiences with Amazon and its", "good experiences with Amazon and its customer service reps, but after todays online", "Método aúxiliar para manejar las conjunciones de un sustantivo a un adverbio/adjetivo \"\"\"", "12), ('dobj', 11, 13), ('case', 18, 14), ('det', 18, 15), ('amod', 18, 16),", "de la tupla en la posición 1, devuelve el lema 'be'. \"\"\" tupla_pos_lem", "None), (',', ',', None), ('but', 'CC', None), ('no', 'DT', None), ('more', 'JJR', None),", "dic_resultado) def test_extractor_4(self): \"\"\" Dado el siguiente comentario: ultimately, it's a sheep Debe", "time and use what discounts where I can. Apparently, AMZ does not like", "'.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6, 2),", "our membership money for services you no longer can provide.\" diccionario = {\"Member\":[\"member\",\"Member\"],", "test_extractor_5(self): \"\"\" Dado el siguiente comentario: black cats are really cute. Debe devolver", "he had 2 returns to do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com)", "= (\"Member\", \"no more\") res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res)", "que de la tupla en la posición 1, devuelve el lema 'be'. \"\"\"", "was trying to get a print out label for my roommate since he", "auxiliar que busca sujetos nominales. Debe encontrar el adjetivo y adverbio del sustantivo", "(\"comment\", \"valid\") self.assertEqual(res, res_esperado) def test__amod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que", "',', None), ('lately', 'RB', None), ('?', '.', None)] diccionario_de_aspectos = {\"Dream\": [\"dream\"]} res", "I can. Apparently, AMZ does not like this and has taken to locking", "def test__amod_3(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se", "it's a sheep Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\" com = \"ultimately, it's a sheep\"", "es el que recorre las lista de tuplas para eliminar las palabras innecesarias.", "\\\"Two day shipping starts when the item leaves the warehouse\\\". They can't ship", "siguiente comentario: ultimately, it's a sheep Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\" com = \"ultimately,", "19), ('cc', 6, 20), ('neg', 22, 21), ('conj', 6, 22), ('punct', 6, 23)]", "metodo quitar_palabras. Se espera que elimine toda palabra que no tenga una etiqueta", "'DT', None), ('Prime', 'JJ', None), ('member', 'NN', None), ('for', 'IN', None), ('years', 'NNS',", "'.', None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado =", "\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"],", "print out label for my roommate since he doesn't have Prime and isn't", "days are long past. If AMZ is good at one thing these days,", "is give me the runaround and tell me their policy has not changed.", "'PRP', None), (\"'s\", 'VBZ', None), ('a', 'DT', None), ('sheep', 'NN', None)] diccionario_de_aspectos =", "= \"don't say no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada =", "a thing of the past. I have been a Prime member for years", "= \"perfectly electric lately\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self): \"\"\" Prueba el metodo quitar_palabras.", "self.assertEqual(res, res_esperado) def test__amod_4(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta", "3, 10)] res_esperado = \"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res) def", "3, devuelve la palabra 'a', ya que el lema es None. \"\"\" tupla_pos_lem", "sent him the same, wrong item.\" diccionario = {\"item\":[\"item\", \"items\"]} arbol = self.cliente.resolver_dependencias(com)", "self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def", "it's a sheep indice_raiz = 6 indice_nodo = 1 lista_pos_lem = [('ultimately', 'RB',", "3, 8), ('advmod', 3, 9), ('punct', 3, 10)] diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]} res", "necesita pasar como argumento el arbol de dependencias que resuelve el Stanford CoreNLP.", "adverbio/adjetivo \"\"\" lista_pos_lem = [('I', 'PRP', None), ('have', 'VBP', None), ('been', 'VBN', None),", "('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3, 7), ('punct', 3, 8), ('advmod',", "diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 1 res_esperado = None", "auxiliar que busca sujetos nominales. Como el sujeto nominas no va de un", "lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_11(self): \"\"\" Pruebas con comentarios reales \"\"\"", "AMZ is good at one thing these days, it is finding new and", "paying the top price. Today I had the simplest question about a feature", "valid comment.\" diccionario = dict() arbol = None pos_lem = list() with self.assertRaises(Exception):", "reales \"\"\" com = \"There was a time I was a super-Amazon fan-boy,", "siguiente comentario: i am a valid comment. Debe devolver el adjetivo 'valid' del", "None)] arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3, 2), ('case',", "starts when the item leaves the warehouse\\\". They can't ship if the items", "They can't ship what's not available. Nice way to save a buck. But", "tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if __name__ == \"__main__\": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()", "def test__conj_1(self): \"\"\" Método aúxiliar para manejar las conjunciones de un sustantivo a", "red cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem =", "encontrar el adjetivo y adverbio del sustantivo 'cats'. Se espera que devuelva ('cats',", "'RB', None), ('received', 'VBD', None), ('my', 'PRP$', None), ('merchandise', 'NN', None), ('in', 'IN',", "self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self): \"\"\" Dado el siguiente comentario: im the red cyclone.", "etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto = \"do you dream", "6, 23)] diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz = 6 indice_nodo = 22 res_esperado =", "devolver {\"cats\":[\"black\",\" really cute\"]} \"\"\" com = \"black cats are really cute.\" diccionario", "diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self):", "lematizador import lematizador import nltk class Test(unittest.TestCase): def setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente", "posición 1, devuelve el lema 'be'. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am',", "for not telling the truth. To save money, Amazon no longer uses reliable", "employs. Enter employee Ruchitha. I was trying to get a print out label", "de adverbio, sustantivo o negacion. \"\"\" texto = \"black cats are really cute.\"", "res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\", \"no", "diccionario de aspectos. Se espera que la palabra 'review' no sea determinado como", "el adjetivo y adverbio del sustantivo 'cats'. Se espera que devuelva ('cats', \"really", "self.assertEqual(res_esperado, res) def test__advmod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta", "dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self): \"\"\" Dado el siguiente", "self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado) def test__amod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que", "res = self.ex.quitar_palabras(texto) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self):", "('conj', 6, 11), ('nmod:poss', 13, 12), ('dobj', 11, 13), ('case', 18, 14), ('det',", "price. Today I had the simplest question about a feature on an item", "= {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 3 res_esperado = (\"example\", \"not\")", "test_extractor_8(self): \"\"\" Dado el siguiente comentario: They sent him the same, wrong item.", "(',', ',', None), ('lately', 'RB', None), ('?', '.', None)] diccionario_de_aspectos = {\"Dream\": [\"dream\"]}", "ejecuta cuando se encuentra una dependencia con la etiqueta \"advmod\". Se espera que", "= \"Usually I have good experiences with Amazon and its customer service reps,", "save a buck. But keep taking our membership money for services you no", "test_extractor_11(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"Prime 2 day shipping", "def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder extraer los aspectos, primero se necesita pasar como", "him the same, wrong item. So he had 2 returns to do.\" diccionario", "arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado,", "can't ship what's not available. Nice way to save a buck. But keep", "ship what's not available. Nice way to save a buck. But keep taking", "17), ('nmod', 11, 18), ('punct', 6, 19), ('cc', 6, 20), ('neg', 22, 21),", "merchandise in the desired time frame, but no more!! I have had numerous", "self.assertEqual(res_esperado, res) def test__amod_advmod(self): \"\"\" En algunas ocaciones, adjetivos de un sustantivo poseen", "since he doesn't have Prime and isn't really internet savvy. After he had", "('case', 8, 7), ('nmod', 6, 8), ('cc', 6, 9), ('advmod', 11, 10), ('conj',", "test__amod_advmod(self): \"\"\" En algunas ocaciones, adjetivos de un sustantivo poseen su propio adverbio.", "= 7 indice_nodo = 6 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None),", "{\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self): \"\"\" Dado el", "[\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def", "la etiqueta \"advmod\". Se espera que regrese el adverbio del sustantivo en una", "not in their warehouses, seemly blaming the vendors. Shame on you Amazon for", "the question as apparently, I am persona non grata these days. I got", "lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def test__advmod_2(self): \"\"\" Prueba el método", "('conj', 6, 22), ('punct', 6, 23)] diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz = 6 indice_nodo", "en una tupla: ('sustantivo', 'dependencia'). \"\"\" # do you dream of perfectly electric", "'RB', None), (',', ',', None), ('it', 'PRP', None), (\"'s\", 'VBZ', None), ('a', 'DT',", "\"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self): \"\"\" Método aúxiliar para manejar", "from lematizador import lematizador import nltk class Test(unittest.TestCase): def setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos()", "Prueba el método auxiliar que es usado para buscar el lema o la", "with customer service and supervisors. All they do is give me the runaround", "auxiliar que es el que recorre las lista de tuplas para eliminar las", "arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self): \"\"\" Dado el siguiente comentario: do you", "('nsubj', 6, 1), ('aux', 6, 2), ('cop', 6, 3), ('det', 6, 4), ('amod',", "Prueba el método auxiliar que busca dependencias de dependencias. Debe encontrar el advmod", "numerous conversations with customer service and supervisors. All they do is give me", "on the net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],}", "To save money, Amazon no longer uses reliable trucking companies to move merchandise", "ejecuta cuando se encuentra una dependencia con la etiqueta \"amod\". Se espera una", "('good', 'JJ', None), ('example', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0,", "def test_extractor_1(self): \"\"\" Dado el siguiente comentario: i am a valid comment. Debe", "las palabras innecesarias. \"\"\" texto = \"don't say no to cookies, never again\"", "\"n't\"),('no', 'DT', None), ('never', 'RB', \"never\"), ('again', 'RB', \"again\")] self.assertEqual(res, tupla_esperada) def test__unir_palabras(self):", "self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self): \"\"\" Dado el siguiente comentario: do you dream of", "('nmod:poss', 13, 12), ('dobj', 11, 13), ('case', 18, 14), ('det', 18, 15), ('amod',", "what's not available. Nice way to save a buck. But keep taking our", "diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def test__amod_advmod(self): \"\"\" En algunas ocaciones, adjetivos", "never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada) texto_esperado =", "la posición 3, devuelve la palabra 'a', ya que el lema es None.", "days. I got the product with a discount via research on the net.\"", "diccionario = {\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado", "isn't really internet savvy. After he had bought a dvd that wasn't playable", "('punct', 3, 8), ('advmod', 3, 9), ('punct', 3, 10)] res_esperado = \"perfectly\" res", "warehousing to Amazon warehouses. They can't ship what's not available. Nice way to", "at one thing these days, it is finding new and innovated ways to", "is good at one thing these days, it is finding new and innovated", "they do is give me the runaround and tell me their policy has", "cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada) texto_esperado", "el método auxiliar que busca dependencias de dependencias. Debe encontrar el advmod del", "indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_11(self): \"\"\" Pruebas con comentarios reales", "6 indice_nodo = 3 res_esperado = (\"example\", \"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem,", "('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo,", "I have had numerous conversations with customer service and supervisors. All they do", "= [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6, 2), ('neg', 6, 3),", "self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado)", "'comment' sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\"]}", "2), ('amod', 4, 3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]}", "indice = 1 resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado = 'be' self.assertEqual(resultado, resultado_esperado) def", "1), ('nsubj', 3, 2), ('case', 7, 4), ('advmod', 6, 5), ('amod', 7, 6),", "del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # ultimately, it's a sheep", "ways to anger their customers. I try to find the best deal with", "con comentarios reales \"\"\" com = \"Prime 2 day shipping seems to be", "one thing these days, it is finding new and innovated ways to anger", "thing these days, it is finding new and innovated ways to anger their", "res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def", "espera una tupla (\"comment\", \"valid\") \"\"\" indice_raiz = 5 indice_nodo = 4 lista_pos_lem", "= [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None), ('of', 'IN', None),", "indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__nsub_2(self): \"\"\" Prueba el método auxiliar", "',', None), ('but', 'CC', None), ('no', 'DT', None), ('more', 'JJR', None), ('!!', '.',", "diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self): \"\"\" Prueba", "lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado)", "etiqueta \"amod\". Se espera una tupla (\"cyclone\", \"red\") \"\"\" indice_raiz = 4 indice_nodo", "to get a print out label for my roommate since he doesn't have", "i am a valid comment. Debe devolver el adjetivo 'valid' del aspecto 'comment'", "cyclone. Debe devolver {\"cyclone\":[\"red\"]} \"\"\" com = \"im the red cyclone.\" diccionario =", "lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado)", "resultado) def test__es_aspecto_3(self): \"\"\" Prueba el método auxiliar que es usado para determinar", "lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado)", "5), ('punct', 6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo =", "5, 6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz = 5 indice_nodo = 2 res_esperado =", "Amazon no longer uses reliable trucking companies to move merchandise from vendors warehousing", "} self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que", "self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\", \"good\"]} dic_resultado =", "etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado =", "('!!', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('aux', 6,", "espera que regrese el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\"", "its customer service reps, but after todays online customer service chat I am", "None)] arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2), ('amod', 4, 3), ('dobj',", "('amod', 4, 3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res", "None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop',", "\"no more\") res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_11(self):", "('amod', 6, 5), ('case', 8, 7), ('nmod', 6, 8), ('cc', 6, 9), ('advmod',", "tupla (\"comment\", \"valid\") \"\"\" indice_raiz = 5 indice_nodo = 4 lista_pos_lem = [('i',", "\"\"\" Prueba el método auxiliar que es usado para determinar si una palabra", "dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"]", "comentario: im the red cyclone. Debe devolver {\"cyclone\":[\"red\"]} \"\"\" com = \"im the", "\"do you dream of perfectly electric sheep, lately?\" diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol", "una etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto = \"do you", "devuelvan ambos en un solo string. Se espera (\"sheep\", \"perfectly electric\") \"\"\" #", "def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if __name__ == \"__main__\": #import sys;sys.argv = ['', 'Test.testName']", "11, 10), ('conj', 6, 11), ('nmod:poss', 13, 12), ('dobj', 11, 13), ('case', 18,", "money, Amazon no longer uses reliable trucking companies to move merchandise from vendors", "'electric'. Se espera que devuelva 'perfectly'. \"\"\" indice_nodo = 6 lista_pos_lem = [('do',", "método auxiliar que busca dependencias de dependencias. Debe encontrar el advmod del adjetivo", "texto = \"don't say no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res", "16), ('compound', 18, 17), ('nmod', 11, 18), ('punct', 6, 19), ('cc', 6, 20),", "('again', 'RB', \"again\")] self.assertEqual(res, tupla_esperada) def test__unir_palabras(self): \"\"\" Método auxiliar que une las", "bought on AMZ, but cannot ask the question as apparently, I am persona", "sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\"]} resultado", "'IN', None), ('the', 'DT', None), ('desired', 'JJ', None), ('time', 'NN', None), ('frame', 'NN',", "no never again\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self): \"\"\" Prueba el metodo quitar_palabras. Se", "= cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder extraer los aspectos,", "\"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in", "black cats. Debe devolver {\"cats\":[\"black\"} \"\"\" com = \"i really love black cats.\"", "in their warehouses, seemly blaming the vendors. Shame on you Amazon for not", "Shame on you Amazon for not telling the truth. To save money, Amazon", "\"\"\" com = \"i am a valid comment.\" diccionario = dict() arbol =", "= nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias: arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos", "dependencias. Debe encontrar el advmod del adjetivo 'electric'. Se espera que devuelva 'perfectly'.", "('advmod', 5, 4), ('punct', 5, 6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz = 5 indice_nodo", "sent him the same, wrong item. So he had 2 returns to do.\"", "('really', 'RB', None), ('cute', 'JJ', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0,", "= self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem)", "trucking companies to move merchandise from vendors warehousing to Amazon warehouses. They can't", "self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\" Prueba el método auxiliar", "res) def test__amod_advmod(self): \"\"\" En algunas ocaciones, adjetivos de un sustantivo poseen su", "lista_pos_lem = [('ultimately', 'RB', None), (',', ',', None), ('it', 'PRP', None), (\"'s\", 'VBZ',", "el lema 'be'. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a',", "18, 15), ('amod', 18, 16), ('compound', 18, 17), ('nmod', 11, 18), ('punct', 6,", "com = \"this is not a good example.\" diccionario = {\"example\":[\"example\"]} arbol =", "('case', 18, 14), ('det', 18, 15), ('amod', 18, 16), ('compound', 18, 17), ('nmod',", "el Stanford CoreNLP. Prueba que el método extraer levante una excepcion si no", "self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\", 'RB', \"n't\"),('no', 'DT', None), ('never', 'RB',", "res) def test_extractor_1(self): \"\"\" Dado el siguiente comentario: i am a valid comment.", "\"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self): \"\"\" Método", "espera None \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem = [('im', 'VB',", "es None. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT',", "dic_resultado) def test_extractor_7(self): \"\"\" Dado el siguiente comentario: this is not a good", "auxiliar que busca dependencias de dependencias. Debe encontrar el advmod del adjetivo 'electric'.", "'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice = 3 resultado =", "('comment', 'NN', 'comment'), ('.', '.', None)] indice = 1 resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem)", "= \"don't say no to cookies, never again\" res = self.ex.quitar_palabras(texto) texto_esperado =", "argumento el arbol de dependencias que resuelve el Stanford CoreNLP. Prueba que el", "= {\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado =", "not like this and has taken to locking people out of their ability", "= None self.assertEqual(res, res_esperado) def test__amod_4(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que", "def test__amod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se", "lematizador import nltk class Test(unittest.TestCase): def setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP()", "lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self): \"\"\" Pruebas con comentarios reales \"\"\" com", "que recorre las lista de tuplas para eliminar las palabras innecesarias. \"\"\" texto", "y adverbio del sustantivo 'cats'. Se espera que devuelva ('cats', \"really cute\"). \"\"\"", "resultado_esperado = 'a' self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self): \"\"\" Prueba el método auxiliar que", "diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self): \"\"\" Pruebas con", "= self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_4(self): \"\"\"", "None), ('but', 'CC', None), ('no', 'DT', None), ('more', 'JJR', None), ('!!', '.', None)]", "am persona non grata these days. I got the product with a discount", "1), ('aux', 6, 2), ('cop', 6, 3), ('det', 6, 4), ('amod', 6, 5),", "res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_4(self):", "sustantivo a un adverbio/adjetivo \"\"\" lista_pos_lem = [('I', 'PRP', None), ('have', 'VBP', None),", "{\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self): \"\"\" Pruebas con comentarios reales", "\"black cats are really cute.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos", "'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\",", "{\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado =", "de un sustantivo poseen su propio adverbio. Esta prueba espera que al encontrar", "I was a super-Amazon fan-boy, but those days are long past. If AMZ", "\"Shipping\":[\"shipping\",\"Shipping\"], } sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias: arbol", "_combinar_dict(self, dict1, dict2): for llave in dict1: if llave in dict2.keys(): dict2[llave].extend(dict1[llave]) else:", "adjetivo y adverbio del sustantivo 'cats'. Se espera que devuelva ('cats', \"really cute\").", "None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6, 2), ('neg',", "(\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__amod_3(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que", "customers. I try to find the best deal with products all the time", "\"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self): \"\"\" Prueba el metodo quitar_palabras. Se espera", "really cute\"]} \"\"\" com = \"black cats are really cute.\" diccionario = {\"cats\":[\"cat\",", "= {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res,", "5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\",", "\"\"\" com = \"do you dream of perfectly electric sheep, lately?\" diccionario =", "Pruebas con comentarios reales \"\"\" com = \"There was a time I was", "res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\", 'RB', \"n't\"),('no', 'DT', None), ('never', 'RB', \"never\"),", "indice_raiz = 6 indice_nodo = 3 res_esperado = (\"example\", \"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz,", "None), ('cute', 'JJ', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 5), ('amod',", "en el diccionario de aspectos. Se espera que la palabra 'review' no sea", "when the item leaves the warehouse\\\". They can't ship if the items are", "diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__nsub_2(self): \"\"\" Prueba el método auxiliar que busca", "cats are really cute.\" res = self.ex.quitar_palabras(texto) texto_esperado = \"black really cute\" self.assertEqual(res,", "(',', ',', None), ('it', 'PRP', None), (\"'s\", 'VBZ', None), ('a', 'DT', None), ('sheep',", "no to cookies, never again\" res = self.ex.quitar_palabras(texto) texto_esperado = \"n't no never", "the correct one. They sent him the same, wrong item. So he had", "arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__neg_1(self): \"\"\" Prueba el método auxiliar que busca negaciones.", "sustantivo o negacion. \"\"\" texto = \"black cats are really cute.\" res =", "res_esperado) def test__amod_5(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando", "self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self):", "None), ('received', 'VBD', None), ('my', 'PRP$', None), ('merchandise', 'NN', None), ('in', 'IN', None),", "\"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias:", "= self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado =", "lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__advmod_1(self): \"\"\" Prueba el", "the net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias", "black cats are really cute. Debe devolver {\"cats\":[\"black\",\" really cute\"]} \"\"\" com =", "= 4 indice_nodo = 3 lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None),", "indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\" Prueba el método auxiliar que", "= {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self): \"\"\" Prueba el", "= self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado = 'be' self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el", "auxiliar que es usado para determinar si una palabra esta en el diccionario", "que une las palabras de la lista de tuplas. \"\"\" texto = \"don't", "\"\"\" Prueba el método auxiliar que busca sujetos nominales. Como el sujeto nominas", "'be'. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None),", "def test_extractor_3(self): \"\"\" Dado el siguiente comentario: do you dream of perfectly electric", "lista de tuplas para eliminar las palabras innecesarias. \"\"\" texto = \"don't say", "arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"], }", "que la palabra 'review' sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment'", "\"There was a time I was a super-Amazon fan-boy, but those days are", "def test__amod_5(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se", "customer service reps, but after todays online customer service chat I am horrified", "res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[],", "'NNS', None), ('and', 'CC', None), ('always', 'RB', None), ('received', 'VBD', None), ('my', 'PRP$',", "self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para", "lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado,", "espera que de la tupla en la posición 1, devuelve el lema 'be'.", "their warehouses, seemly blaming the vendors. Shame on you Amazon for not telling", "('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None), ('?', '.', None)] diccionario_de_aspectos", "negacion. \"\"\" texto = \"black cats are really cute.\" res = self.ex.quitar_palabras(texto) texto_esperado", "None), ('frame', 'NN', None), (',', ',', None), ('but', 'CC', None), ('no', 'DT', None),", "aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\", \"review\"]} resultado = self.ex._es_aspecto(palabra,", "(la salida que ofrece cliente_corenlp.resolver_dependencias). \"\"\" com = \"i am a valid comment.\"", "test_quitar_palabras_2(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que", "('sustantivo', 'dependencia'). \"\"\" # ultimately, it's a sheep indice_raiz = 6 indice_nodo =", "cats are really cute.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos =", "never again\" self.assertEqual(res, texto_esperado) def _combinar_dict(self, dict1, dict2): for llave in dict1: if", "'.', None)] indice = 1 resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado = 'be' self.assertEqual(resultado,", "all the time and use what discounts where I can. Apparently, AMZ does", "3, 7), ('punct', 3, 8), ('advmod', 3, 9), ('punct', 3, 10)] res_esperado =", "= \"n't no never again\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self): \"\"\" Prueba el metodo", "the items are not in their warehouses, seemly blaming the vendors. Shame on", "= self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"comment\", \"valid\") self.assertEqual(res, res_esperado) def test__amod_2(self):", "\"\"\" com = \"Usually I have good experiences with Amazon and its customer", "indice_nodo = 2 res_esperado = (\"cats\", \"really cute\") res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem,", "7, 4), ('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3, 7), ('punct', 3,", "diccionario_de_aspectos = {\"sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\")", "have had numerous conversations with customer service and supervisors. All they do is", "arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado", "= {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias:", "'CC', None), ('no', 'DT', None), ('more', 'JJR', None), ('!!', '.', None)] arbol_de_dependencias =", "get a print out label for my roommate since he doesn't have Prime", "dict2[llave].extend(dict1[llave]) else: dict2[llave] = dict1[llave] return dict2 def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if __name__", "tupla en la posición 3, devuelve la palabra 'a', ya que el lema", "con la etiqueta \"advmod\". Se espera que regrese el adverbio del sustantivo en", "self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"],", "with a discount via research on the net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\",", "of perfectly electric sheep, lately? indice_raiz = 7 indice_nodo = 6 lista_pos_lem =", "shipping starts when the item leaves the warehouse\\\". They can't ship if the", "palabras de la lista de tuplas. \"\"\" texto = \"don't say no to", "the item leaves the warehouse\\\". They can't ship if the items are not", "= \"im the red cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos =", "electric\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self): \"\"\" Dado el", "('a', 'DT', None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice", "algunas ocaciones, adjetivos de un sustantivo poseen su propio adverbio. Esta prueba espera", "('not', 'RB', None), ('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN', None), ('.',", "send him the correct one. They sent him the same, wrong item. So", "se encuentra una dependencia con la etiqueta \"amod\". Se espera None \"\"\" indice_raiz", "diccionario_de_aspectos = {\"Dream\": [\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\")", "and isn't really internet savvy. After he had bought a dvd that wasn't", "dic_resultado) def test_extractor_10(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"There was", "called customer service and a rep said they were going to send him", "the desired time frame, but no more!! I have had numerous conversations with", "arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\" Prueba el método auxiliar que busca sujetos nominales.", "'NN', 'comment'), ('.', '.', None)] indice = 3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado", "cute.\" res = self.ex.quitar_palabras(texto) texto_esperado = \"black really cute\" self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self):", "sheep, lately? indice_raiz = 3 indice_nodo = 9 lista_pos_lem = [('do', 'VB', None),", "res) def test_extractor_11(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"Prime 2", "Se espera que la palabra 'review' no sea determinado como aspecto y devuelva", "Amazon warehouses. They can't ship what's not available. Nice way to save a", "\"i really love black cats.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos", "6, 1), ('aux', 6, 2), ('cop', 6, 3), ('det', 6, 4), ('amod', 6,", "fora de una lista (la salida que ofrece cliente_corenlp.resolver_dependencias). \"\"\" com = \"i", "= self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado =", "you dream of perfectly electric sheep, lately? indice_raiz = 7 indice_nodo = 6", "the past. I have been a Prime member for years and always received", "etiqueta \"advmod\". Se espera que regrese el adverbio del sustantivo en una tupla:", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\", \"no more\"],", "('cyclone', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4,", "a valid comment. Debe devolver el adjetivo 'valid' del aspecto 'comment' \"\"\" com", "esta en el diccionario de aspectos. Se espera que la palabra 'review' no", "9 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None), ('of',", "arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_11(self): \"\"\" Pruebas con comentarios reales \"\"\" com =", "really internet savvy. After he had bought a dvd that wasn't playable in", "método auxiliar que es usado para buscar el lema o la palabra de", "7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 3 res_esperado =", "no va de un adjetivo a un sustantivo, debe regresar None. \"\"\" lista_pos_lem", "on you Amazon for not telling the truth. To save money, Amazon no", "} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias: arbol =", "('you', 'PRP', None), ('dream', 'NN', None), ('of', 'IN', None), ('perfectly', 'RB', None), ('electric',", "him the same, wrong item. Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com = \"They sent", "una dependencia con la etiqueta \"amod\". Se espera una tupla (\"cyclone\", \"red\") \"\"\"", "diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = None self.assertEqual(res,", "Debe devolver {\"cats\":[\"black\",\" really cute\"]} \"\"\" com = \"black cats are really cute.\"", "que resuelve el Stanford CoreNLP. Prueba que el método extraer levante una excepcion", "but cannot ask the question as apparently, I am persona non grata these", "conversations with customer service and supervisors. All they do is give me the", "o negacion. \"\"\" texto = \"don't say no to cookies, never again\" res", "a super-Amazon fan-boy, but those days are long past. If AMZ is good", "of the past. I have been a Prime member for years and always", "\"\"\" texto = \"don't say no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto))", "test_extractor_3(self): \"\"\" Dado el siguiente comentario: do you dream of perfectly electric sheep,", "None), ('years', 'NNS', None), ('and', 'CC', None), ('always', 'RB', None), ('received', 'VBD', None),", "6, 3), ('det', 6, 4), ('amod', 6, 5), ('case', 8, 7), ('nmod', 6,", "cute\"). \"\"\" lista_pos_lem = [('black', 'JJ', None), ('cats', 'NNS', None), ('are', 'VBP', None),", "6, 22), ('punct', 6, 23)] diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz = 6 indice_nodo =", "18, 16), ('compound', 18, 17), ('nmod', 11, 18), ('punct', 6, 19), ('cc', 6,", "good example. Debe devolver {\"example\":[\"not good\"]} \"\"\" com = \"this is not a", "about a feature on an item I bought on AMZ, but cannot ask", "on AMZ, but cannot ask the question as apparently, I am persona non", "resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self): \"\"\" Prueba el método auxiliar", "import sys sys.path.append('../../extractor_de_aspectos') import unittest from extractor import extractor_de_aspectos from cliente_corenlp import cliente_corenlp", "diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self): \"\"\"", "None), ('always', 'RB', None), ('received', 'VBD', None), ('my', 'PRP$', None), ('merchandise', 'NN', None),", "de un adjetivo a un sustantivo, debe regresar None. \"\"\" lista_pos_lem = [('this',", "locking people out of their ability to comment on products if they feel", "recorre las lista de tuplas para eliminar las palabras innecesarias. \"\"\" texto =", "cute\") res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__nsub_2(self): \"\"\"", "con la etiqueta \"amod\". Se espera una tupla (\"comment\", \"valid\") \"\"\" indice_raiz =", "Dado el siguiente comentario: black cats are really cute. Debe devolver {\"cats\":[\"black\",\" really", "None), ('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN', None), ('.', '.', None)]", "palabra esta en el diccionario de aspectos. Se espera que la palabra 'comment'", "I have good experiences with Amazon and its customer service reps, but after", "electric sheep, lately? indice_raiz = 3 indice_nodo = 9 lista_pos_lem = [('do', 'VB',", "{\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]}", "lema 'be'. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT',", "= 'comment' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self):", "\"\"\" com = \"i really love black cats.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol", "lista de tuplas. \"\"\" texto = \"don't say no to cookies, never again\"", "lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self): \"\"\" Dado el siguiente comentario: this is not", "diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\")", "service reps, but after todays online customer service chat I am horrified at", "('punct', 1, 5)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias)", "dict2 def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if __name__ == \"__main__\": #import sys;sys.argv = ['',", "the same, wrong item. So he had 2 returns to do.\" diccionario =", "not available. Nice way to save a buck. But keep taking our membership", "wrong item.\" diccionario = {\"item\":[\"item\", \"items\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem", "= {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 1 res_esperado = None res", "3, 9), ('punct', 3, 10)] res_esperado = \"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias)", "= self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def", "('amod', 7, 6), ('nmod', 3, 7), ('punct', 3, 8), ('advmod', 3, 9), ('punct',", "None), (',', ',', None), ('lately', 'RB', None), ('?', '.', None)] arbol_de_dependencias = [('ROOT',", "9), ('punct', 3, 10)] diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem,", "you dream of perfectly electric sheep, lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com =", "su propio advmod, se devuelvan ambos en un solo string. Se espera (\"sheep\",", "\"again\")] self.assertEqual(res, tupla_esperada) def test__unir_palabras(self): \"\"\" Método auxiliar que une las palabras de", "4), ('punct', 1, 5)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos,", "'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1),", "= {\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"comment\", \"valid\") self.assertEqual(res,", "policy has not changed. \\\"Two day shipping starts when the item leaves the", "= (\"cats\", \"really cute\") res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res)", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self): \"\"\" Dado el siguiente comentario:", "lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\", \"good\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado,", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"]", "'PRP', None), ('have', 'VBP', None), ('been', 'VBN', None), ('a', 'DT', None), ('Prime', 'JJ',", "same, wrong item. So he had 2 returns to do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"],", "do you dream of perfectly electric sheep, lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com", "Prueba el método auxiliar que busca sujetos nominales. Debe encontrar el adjetivo y", "lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__nsub_2(self): \"\"\" Prueba el método auxiliar que", "comment.\" diccionario = {\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)", "None), ('lately', 'RB', None), ('?', '.', None)] arbol_de_dependencias = [('ROOT', 0, 3), ('aux',", "6, 8), ('cc', 6, 9), ('advmod', 11, 10), ('conj', 6, 11), ('nmod:poss', 13,", "None), ('in', 'IN', None), ('the', 'DT', None), ('desired', 'JJ', None), ('time', 'NN', None),", "'NN', None), ('frame', 'NN', None), (',', ',', None), ('but', 'CC', None), ('no', 'DT',", "indice_raiz = 6 indice_nodo = 1 res_esperado = None res = self.ex._extraer_nsubj(indice_raiz, indice_nodo,", "cuando se encuentra una dependencia con la etiqueta \"amod\". Se espera una tupla", "lista (la salida que ofrece cliente_corenlp.resolver_dependencias). \"\"\" com = \"i am a valid", "'DT', None), ('desired', 'JJ', None), ('time', 'NN', None), ('frame', 'NN', None), (',', ',',", "tupla_pos_lem) resultado_esperado = 'a' self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self): \"\"\" Prueba el método auxiliar", "6), ('nsubj', 6, 1), ('cop', 6, 2), ('neg', 6, 3), ('det', 6, 4),", "= 3 indice_nodo = 9 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None),", "arbol = None pos_lem = list() with self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol, pos_lem) def", "que ofrece cliente_corenlp.resolver_dependencias). \"\"\" com = \"i am a valid comment.\" diccionario =", "sustantivos 'example'. Se espera que devuelva ('example','not'). \"\"\" lista_pos_lem = [('this', 'DT', None),", "diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self): \"\"\" Prueba el método auxiliar que es usado", "cuando se encuentra una dependencia con la etiqueta \"advmod\". Se espera que regrese", "\"no more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self): \"\"\" Prueba el metodo quitar_palabras.", "= 22 res_esperado = (\"Member\", \"no more\") res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos,", "self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario,", "Dado el siguiente comentario: i am a valid comment. Debe devolver el adjetivo", "10)] diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado", "primero se necesita pasar como argumento el arbol de dependencias que resuelve el", "None), ('you', 'PRP', None), ('dream', 'NN', None), ('of', 'IN', None), ('perfectly', 'RB', None),", "{\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = None self.assertEqual(res, res_esperado) def", "telling the truth. To save money, Amazon no longer uses reliable trucking companies", "diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado) def test__amod_1(self): \"\"\" Prueba", "pos_lem = list() with self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba", "I was trying to get a print out label for my roommate since", "el método auxiliar que busca sujetos nominales. Como el sujeto nominas no va", "'NN', None), ('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None), ('sheep', 'NN',", "self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine", "None), (',', ',', None), ('it', 'PRP', None), (\"'s\", 'VBZ', None), ('a', 'DT', None),", "= (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__amod_3(self): \"\"\" Prueba el método auxiliar _extraer_dependencia", "1), ('cop', 6, 2), ('neg', 6, 3), ('det', 6, 4), ('amod', 6, 5),", "self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"There", "items are not in their warehouses, seemly blaming the vendors. Shame on you", "('case', 7, 4), ('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3, 7), ('punct',", "= self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\", 'RB', \"n't\"),('no', 'DT', None), ('never', 'RB', \"never\"), ('again',", "test__purgar_palabras_pos(self): \"\"\" Método auxiliar que es el que recorre las lista de tuplas", "Se espera que elimine toda palabra que no tenga una etiqueta POS de", "= {\"Sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly", "truth. To save money, Amazon no longer uses reliable trucking companies to move", "'VBD', None), ('my', 'PRP$', None), ('merchandise', 'NN', None), ('in', 'IN', None), ('the', 'DT',", "trying to get a print out label for my roommate since he doesn't", "longer can provide.\" diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias = nltk.sent_tokenize(com) dic_resultado =", "diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self): \"\"\" Prueba el", "= [('ROOT', 0, 6), ('nsubj', 6, 1), ('aux', 6, 2), ('cop', 6, 3),", "def test__amod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se", "'VBP', None), ('really', 'RB', None), ('cute', 'JJ', None), ('.', '.', None)] arbol_de_dependencias =", "really cute\" self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self): \"\"\" Método auxiliar que es el que", "diccionario = {\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado", "\"\"\" Prueba el método auxiliar que busca negaciones. Debe encontrar la negacion del", "una posición. Se espera que de la tupla en la posición 3, devuelve", "= {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado", "arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado", "time I was a super-Amazon fan-boy, but those days are long past. If", "years and always received my merchandise in the desired time frame, but no", "texto = \"do you dream of perfectly electric sheep, lately?\" res = self.ex.quitar_palabras(texto)", "\"im the red cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com)", "del adjetivo 'electric'. Se espera que devuelva 'perfectly'. \"\"\" indice_nodo = 6 lista_pos_lem", "and use what discounts where I can. Apparently, AMZ does not like this", "0, 5), ('amod', 2, 1), ('nsubj', 5, 2), ('cop', 5, 3), ('advmod', 5,", "indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_1(self): \"\"\" Dado el siguiente comentario:", "in the country, he called customer service and a rep said they were", "Enter employee Ruchitha. I was trying to get a print out label for", "('received', 'VBD', None), ('my', 'PRP$', None), ('merchandise', 'NN', None), ('in', 'IN', None), ('the',", "('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice = 1 resultado", "self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self):", "sujetos nominales. Debe encontrar el adjetivo y adverbio del sustantivo 'cats'. Se espera", "'.', None)] arbol_de_dependencias = [('ROOT', 0, 5), ('amod', 2, 1), ('nsubj', 5, 2),", "arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el método auxiliar que es usado para", "super-Amazon fan-boy, but those days are long past. If AMZ is good at", "para manejar las conjunciones de un sustantivo a un adverbio/adjetivo \"\"\" lista_pos_lem =", "'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)]", "None), ('!!', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('aux',", "se encuentra una dependencia con la etiqueta \"amod\". Se espera una tupla (\"comment\",", "('det', 18, 15), ('amod', 18, 16), ('compound', 18, 17), ('nmod', 11, 18), ('punct',", "el siguiente comentario: im the red cyclone. Debe devolver {\"cyclone\":[\"red\"]} \"\"\" com =", "sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # do you dream of perfectly", "('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN', None), ('.', '.', None)] arbol_de_dependencias", "sys sys.path.append('../../extractor_de_aspectos') import unittest from extractor import extractor_de_aspectos from cliente_corenlp import cliente_corenlp from", "2), ('cop', 5, 3), ('advmod', 5, 4), ('punct', 5, 6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]}", "really love black cats.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos =", "comentarios reales \"\"\" com = \"There was a time I was a super-Amazon", "lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def test__amod_advmod(self): \"\"\" En algunas ocaciones,", "el que recorre las lista de tuplas para eliminar las palabras innecesarias. \"\"\"", "a dvd that wasn't playable in the country, he called customer service and", "ask the question as apparently, I am persona non grata these days. I", "una posición. Se espera que de la tupla en la posición 1, devuelve", "= {\"sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado,", "8), ('advmod', 3, 9), ('punct', 3, 10)] res_esperado = \"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo,", "= 4 lista_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None),", "'perfectly'. \"\"\" indice_nodo = 6 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None),", "\"red\") self.assertEqual(res, res_esperado) def test__amod_3(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se", "espera que de la tupla en la posición 3, devuelve la palabra 'a',", "1, devuelve el lema 'be'. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP',", "comentario: They sent him the same, wrong item. Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com", "sustantivo poseen su propio adverbio. Esta prueba espera que al encontrar una dependencia", "item. Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com = \"They sent him the same, wrong", "= 6 indice_nodo = 1 lista_pos_lem = [('ultimately', 'RB', None), (',', ',', None),", "I bought on AMZ, but cannot ask the question as apparently, I am", "dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado) def", "\"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\" Prueba", "cookies, never again\" res = self.ex.quitar_palabras(texto) texto_esperado = \"n't no never again\" self.assertEqual(res,", "= {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado) def test__amod_1(self): \"\"\" Prueba el", "palabra 'review' no sea determinado como aspecto y devuelva None. \"\"\" palabra =", "(\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__advmod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que", "self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_1(self): \"\"\" Dado el siguiente", "cute.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem =", "sys.path.append('../../extractor_de_aspectos') import unittest from extractor import extractor_de_aspectos from cliente_corenlp import cliente_corenlp from lematizador", "service and a rep said they were going to send him the correct", "diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def test__advmod_2(self): \"\"\" Prueba el método auxiliar", "was a time I was a super-Amazon fan-boy, but those days are long", "los aspectos, primero se necesita pasar como argumento el arbol de dependencias que", "had numerous conversations with customer service and supervisors. All they do is give", "= \"ultimately, it's a sheep\" diccionario = {\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos =", "in the desired time frame, but no more!! I have had numerous conversations", "3, 8), ('advmod', 3, 9), ('punct', 3, 10)] res_esperado = \"perfectly\" res =", "= [('ROOT', 0, 5), ('amod', 2, 1), ('nsubj', 5, 2), ('cop', 5, 3),", "None), ('have', 'VBP', None), ('been', 'VBN', None), ('a', 'DT', None), ('Prime', 'JJ', None),", "is not a good example. Debe devolver {\"example\":[\"not good\"]} \"\"\" com = \"this", "you no longer can provide.\" diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias = nltk.sent_tokenize(com)", "('?', '.', None)] diccionario_de_aspectos = {\"Dream\": [\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)", "self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def test__advmod_2(self): \"\"\" Prueba", "com = \"There was a time I was a super-Amazon fan-boy, but those", "('cc', 6, 20), ('neg', 22, 21), ('conj', 6, 22), ('punct', 6, 23)] diccionario_de_aspectos", "regrese el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # ultimately,", "de una lista (la salida que ofrece cliente_corenlp.resolver_dependencias). \"\"\" com = \"i am", "palabras innecesarias. \"\"\" texto = \"don't say no to cookies, never again\" lista_pos_lem", "self.assertEqual(res, res_esperado) def test__amod_5(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta", "a sheep indice_raiz = 6 indice_nodo = 1 lista_pos_lem = [('ultimately', 'RB', None),", "6), ('nsubj', 6, 1), ('aux', 6, 2), ('cop', 6, 3), ('det', 6, 4),", "buck. But keep taking our membership money for services you no longer can", "\"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias: arbol =", "de aspectos en fora de una lista (la salida que ofrece cliente_corenlp.resolver_dependencias). \"\"\"", "\"\"\" Dado el siguiente comentario: ultimately, it's a sheep Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\"", "I am horrified at some of the people Amazon employs. Enter employee Ruchitha.", "conjunciones de un sustantivo a un adverbio/adjetivo \"\"\" lista_pos_lem = [('I', 'PRP', None),", "('sheep', 'NN', None)] diccionario_de_aspectos = {\"sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)", "5 indice_nodo = 4 lista_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a',", "# ultimately, it's a sheep indice_raiz = 6 indice_nodo = 1 lista_pos_lem =", "'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem,", "23)] diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz = 6 indice_nodo = 22 res_esperado = (\"Member\",", "= \"There was a time I was a super-Amazon fan-boy, but those days", "def test_quitar_palabras_3(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda palabra", "the vendors. Shame on you Amazon for not telling the truth. To save", "(\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self): \"\"\" Prueba el método auxiliar que busca", "\"amod\". Se espera una tupla (\"cyclone\", \"red\") \"\"\" indice_raiz = 4 indice_nodo =", "devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com = \"They sent him the same, wrong item.\" diccionario", "res = self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def _combinar_dict(self,", "\"\"\" Dado el siguiente comentario: i am a valid comment. Debe devolver el", "resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self): \"\"\" Prueba el método auxiliar", "have been a Prime member for years and always received my merchandise in", "= dict1[llave] return dict2 def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if __name__ == \"__main__\": #import", "lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__neg_1(self): \"\"\" Prueba el método auxiliar que busca", "'IN', None), ('years', 'NNS', None), ('and', 'CC', None), ('always', 'RB', None), ('received', 'VBD',", "llave in dict1: if llave in dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave] = dict1[llave] return", "comentarios reales \"\"\" com = \"Prime 2 day shipping seems to be a", "the people Amazon employs. Enter employee Ruchitha. I was trying to get a", "with self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el método auxiliar", "texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self): \"\"\" Prueba el", "= {\"cats\":[\"cats\"]} indice_raiz = 5 indice_nodo = 2 res_esperado = (\"cats\", \"really cute\")", "services you no longer can provide.\" diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias =", "6, 2), ('cop', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('case', 8,", "\"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self): \"\"\" Método aúxiliar", "{\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self): \"\"\" Dado el", "etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto = \"don't say no", "('never', 'RB', \"never\"), ('again', 'RB', \"again\")] self.assertEqual(res, tupla_esperada) def test__unir_palabras(self): \"\"\" Método auxiliar", "self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self): \"\"\" Dado el siguiente comentario: black cats are really", "None self.assertEqual(res, res_esperado) def test__amod_5(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se", "devolver {\"sheep\":[\"ultimately\"]} \"\"\" com = \"ultimately, it's a sheep\" diccionario = {\"sheep\":[\"sheep\"]} arbol", "good at one thing these days, it is finding new and innovated ways", "diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado =", "= list() with self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el", "electric\") \"\"\" # do you dream of perfectly electric sheep, lately? indice_raiz =", "3, 1), ('nsubj', 3, 2), ('case', 7, 4), ('advmod', 6, 5), ('amod', 7,", "método auxiliar que busca negaciones. Debe encontrar la negacion del sustantivos 'example'. Se", "self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine", "un sustantivo a un adverbio/adjetivo \"\"\" lista_pos_lem = [('I', 'PRP', None), ('have', 'VBP',", "dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self): \"\"\" Dado el siguiente", "service and supervisors. All they do is give me the runaround and tell", "sujeto nominas no va de un adjetivo a un sustantivo, debe regresar None.", "dic_resultado) diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self): \"\"\" Pruebas", "y devuelva None. \"\"\" palabra = 'review' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra,", "seemly blaming the vendors. Shame on you Amazon for not telling the truth.", "is finding new and innovated ways to anger their customers. I try to", "('aux', 3, 1), ('nsubj', 3, 2), ('case', 7, 4), ('advmod', 6, 5), ('amod',", "'NN', None)] diccionario_de_aspectos = {\"sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado", "busca dependencias de dependencias. Debe encontrar el advmod del adjetivo 'electric'. Se espera", "res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def test__amod_advmod(self): \"\"\" En algunas ocaciones, adjetivos de", "Debe devolver {\"cats\":[\"black\"} \"\"\" com = \"i really love black cats.\" diccionario =", "('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res", "use what discounts where I can. Apparently, AMZ does not like this and", "solo string. Se espera (\"sheep\", \"perfectly electric\") \"\"\" # do you dream of", "6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 3 res_esperado", "\"this is not a good example.\" diccionario = {\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos", "def test_quitar_palabras_2(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda palabra", "adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # ultimately, it's a", "('lately', 'RB', None), ('?', '.', None)] diccionario_de_aspectos = {\"Dream\": [\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz,", "= [('black', 'JJ', None), ('cats', 'NNS', None), ('are', 'VBP', None), ('really', 'RB', None),", "self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self): \"\"\" Dado el siguiente comentario: They", "una etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto = \"black cats", "poder extraer los aspectos, primero se necesita pasar como argumento el arbol de", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"],", "can provide.\" diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias = nltk.sent_tokenize(com) dic_resultado = dict()", "extractor import extractor_de_aspectos from cliente_corenlp import cliente_corenlp from lematizador import lematizador import nltk", "past. If AMZ is good at one thing these days, it is finding", "etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario, arbol,", "self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__nsub_2(self): \"\"\" Prueba el método", "Debe devolver el adjetivo 'valid' del aspecto 'comment' \"\"\" com = \"i am", "{\"sheep\":[\"ultimately\"]} \"\"\" com = \"ultimately, it's a sheep\" diccionario = {\"sheep\":[\"sheep\"]} arbol =", "de dependencias. Debe encontrar el advmod del adjetivo 'electric'. Se espera que devuelva", "palabra de una tupla pos_lem dado una posición. Se espera que de la", "adverbio, sustantivo o negacion. \"\"\" texto = \"do you dream of perfectly electric", "('but', 'CC', None), ('no', 'DT', None), ('more', 'JJR', None), ('!!', '.', None)] arbol_de_dependencias", "= {\"comment\":[\"comment\", \"review\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self): \"\"\" Prueba", "\"items\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]}", "to send him the correct one. They sent him the same, wrong item.", "dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self): \"\"\" Dado el siguiente", "palabra que no tenga una etiqueta POS de adverbio, sustantivo o negacion. \"\"\"", "res_esperado = None self.assertEqual(res, res_esperado) def test__amod_5(self): \"\"\" Prueba el método auxiliar _extraer_dependencia", "frame, but no more!! I have had numerous conversations with customer service and", "Método auxiliar que es el que recorre las lista de tuplas para eliminar", "\"red\") \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem = [('im', 'VB', None),", "def test_extractor_11(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"Prime 2 day", "'comment'), ('.', '.', None)] indice = 1 resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado =", "= self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self): \"\"\" Prueba el método auxiliar que", "recibe el arbol de aspectos en fora de una lista (la salida que", "None), ('it', 'PRP', None), (\"'s\", 'VBZ', None), ('a', 'DT', None), ('sheep', 'NN', None)]", "their policy has not changed. \\\"Two day shipping starts when the item leaves", "Amazon and its customer service reps, but after todays online customer service chat", "dic_resultado = dict() for sentencia in sentencias: arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia)", "[\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado,", "indice_nodo = 1 lista_pos_lem = [('ultimately', 'RB', None), (',', ',', None), ('it', 'PRP',", "eliminar las palabras innecesarias. \"\"\" texto = \"don't say no to cookies, never", "lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado)", "propio advmod, se devuelvan ambos en un solo string. Se espera (\"sheep\", \"perfectly", "Pruebas con comentarios reales \"\"\" com = \"Prime 2 day shipping seems to", "= self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_1(self): \"\"\" Dado el", "el método auxiliar que busca negaciones. Debe encontrar la negacion del sustantivos 'example'.", "\"\"\" texto = \"don't say no to cookies, never again\" res = self.ex.quitar_palabras(texto)", "encuentra una dependencia con la etiqueta \"amod\". Se espera una tupla (\"cyclone\", \"red\")", "5, 2), ('cop', 5, 3), ('advmod', 5, 4), ('punct', 5, 6)] diccionario_de_aspectos =", "self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado)", "('cute', 'JJ', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 5), ('amod', 2,", "to locking people out of their ability to comment on products if they", "Pruebas con comentarios reales \"\"\" com = \"Usually I have good experiences with", "('in', 'IN', None), ('the', 'DT', None), ('desired', 'JJ', None), ('time', 'NN', None), ('frame',", "extraer los aspectos, primero se necesita pasar como argumento el arbol de dependencias", "\"advmod\". Se espera que regrese el adverbio del sustantivo en una tupla: ('sustantivo',", "adjetivo 'valid' del aspecto 'comment' \"\"\" com = \"i am a valid comment.\"", "self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"],", "_extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta \"advmod\".", "wrong item. So he had 2 returns to do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]}", "('years', 'NNS', None), ('and', 'CC', None), ('always', 'RB', None), ('received', 'VBD', None), ('my',", "\"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self): \"\"\" Pruebas con comentarios reales \"\"\"", "posición. Se espera que de la tupla en la posición 3, devuelve la", "texto_esperado) def test__purgar_palabras_pos(self): \"\"\" Método auxiliar que es el que recorre las lista", "None. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None),", "ultimately, it's a sheep indice_raiz = 6 indice_nodo = 1 lista_pos_lem = [('ultimately',", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self): \"\"\" Dado el siguiente comentario:", "res) def test_extraer_dependencia_doble_1(self): \"\"\" Prueba el método auxiliar que busca dependencias de dependencias.", "comment on products if they feel you are not paying the top price.", "and supervisors. All they do is give me the runaround and tell me", "'review' sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\",", "que el método extraer levante una excepcion si no recibe el arbol de", "= self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado = 'a' self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self): \"\"\" Prueba el", "net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias =", "4), ('amod', 6, 5), ('case', 8, 7), ('nmod', 6, 8), ('cc', 6, 9),", "\"\"\" Dado el siguiente comentario: i really love black cats. Debe devolver {\"cats\":[\"black\"}", "('amod', 6, 5), ('punct', 6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6", "None), ('desired', 'JJ', None), ('time', 'NN', None), ('frame', 'NN', None), (',', ',', None),", "\"\"\" # do you dream of perfectly electric sheep, lately? indice_raiz = 7", "texto_esperado) def test_quitar_palabras_3(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda", "el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con", "7), ('punct', 3, 8), ('advmod', 3, 9), ('punct', 3, 10)] res_esperado = \"perfectly\"", "arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6, 2), ('neg', 6,", "= \"Prime 2 day shipping seems to be a thing of the past.", "electric\") self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self): \"\"\" Prueba el método auxiliar que busca dependencias", "AMZ does not like this and has taken to locking people out of", "# do you dream of perfectly electric sheep, lately? indice_raiz = 3 indice_nodo", "\"don't say no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem)", "CoreNLP. Prueba que el método extraer levante una excepcion si no recibe el", "ejecuta cuando se encuentra una dependencia con la etiqueta \"amod\". Se espera None", "with Amazon and its customer service reps, but after todays online customer service", "\"\"\" Prueba el método auxiliar que busca dependencias de dependencias. Debe encontrar el", "= 5 indice_nodo = 2 res_esperado = (\"cats\", \"really cute\") res = self.ex._extraer_nsubj(indice_raiz,", "vendors warehousing to Amazon warehouses. They can't ship what's not available. Nice way", "string. Se espera (\"sheep\", \"perfectly electric\") \"\"\" # do you dream of perfectly", "'cats'. Se espera que devuelva ('cats', \"really cute\"). \"\"\" lista_pos_lem = [('black', 'JJ',", "res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_1(self): \"\"\" Dado", "8, 7), ('nmod', 6, 8), ('cc', 6, 9), ('advmod', 11, 10), ('conj', 6,", "indice_nodo = 9 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN',", "{\"cats\":[\"black\",\"really cute\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self): \"\"\" Dado", "usado para buscar el lema o la palabra de una tupla pos_lem dado", "def test_extractor_7(self): \"\"\" Dado el siguiente comentario: this is not a good example.", "res_esperado) def test__advmod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando", "auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta", "None), ('the', 'DT', None), ('desired', 'JJ', None), ('time', 'NN', None), ('frame', 'NN', None),", "propio adverbio. Esta prueba espera que al encontrar una dependencia amod que tiene", "= self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__advmod_1(self):", "lista_pos_lem = [('I', 'PRP', None), ('have', 'VBP', None), ('been', 'VBN', None), ('a', 'DT',", "= (\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def test__amod_advmod(self): \"\"\" En algunas ocaciones, adjetivos de un", "\"\"\" Pruebas con comentarios reales \"\"\" com = \"Prime 2 day shipping seems", "def test_extraer_dependencia_doble_1(self): \"\"\" Prueba el método auxiliar que busca dependencias de dependencias. Debe", "11), ('nmod:poss', 13, 12), ('dobj', 11, 13), ('case', 18, 14), ('det', 18, 15),", "again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't", "{\"item\":[\"item\", \"items\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado =", "diccionario_de_aspectos = {\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"comment\", \"valid\")", "el método extraer levante una excepcion si no recibe el arbol de aspectos", "dict1[llave] return dict2 def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if __name__ == \"__main__\": #import sys;sys.argv", "the red cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem", "etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario, arbol,", "those days are long past. If AMZ is good at one thing these", "self.assertEqual(res_esperado, res) def test__neg_1(self): \"\"\" Prueba el método auxiliar que busca negaciones. Debe", "negacion. \"\"\" texto = \"don't say no to cookies, never again\" res =", "7, 6), ('nmod', 3, 7), ('punct', 3, 8), ('advmod', 3, 9), ('punct', 3,", "do you dream of perfectly electric sheep, lately? indice_raiz = 7 indice_nodo =", "'dependencia'). \"\"\" # do you dream of perfectly electric sheep, lately? indice_raiz =", "dict() arbol = None pos_lem = list() with self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol, pos_lem)", "'review' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado) def test__amod_1(self): \"\"\"", "una dependencia con la etiqueta \"amod\". Se espera una tupla (\"comment\", \"valid\") \"\"\"", "tell me their policy has not changed. \\\"Two day shipping starts when the", "\"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem = [('im', 'VB', None), ('the',", "lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't no", "('the', 'DT', None), ('desired', 'JJ', None), ('time', 'NN', None), ('frame', 'NN', None), (',',", "self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self): \"\"\" Pruebas con comentarios reales", "= self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado = self.ex.extraer(diccionario,", "cliente_corenlp import cliente_corenlp from lematizador import lematizador import nltk class Test(unittest.TestCase): def setUp(self):", "resuelve el Stanford CoreNLP. Prueba que el método extraer levante una excepcion si", "res_esperado = (\"comment\", \"valid\") self.assertEqual(res, res_esperado) def test__amod_2(self): \"\"\" Prueba el método auxiliar", "('.', '.', None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado", "que regrese el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" #", "= self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado =", "= (\"example\", \"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self):", "= self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res) def", "\"\"\" Método aúxiliar para manejar las conjunciones de un sustantivo a un adverbio/adjetivo", "= \"n't no never again\" self.assertEqual(res, texto_esperado) def _combinar_dict(self, dict1, dict2): for llave", "= 2 res_esperado = (\"cats\", \"really cute\") res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos,", "new and innovated ways to anger their customers. I try to find the", "lately? indice_raiz = 7 indice_nodo = 6 lista_pos_lem = [('do', 'VB', None), ('you',", "(\"example\", \"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\"", "arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]}", "arbol_de_dependencias = [('ROOT', 0, 5), ('amod', 2, 1), ('nsubj', 5, 2), ('cop', 5,", "= self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res,", "None), ('dream', 'NN', None), ('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None),", "Prueba que el método extraer levante una excepcion si no recibe el arbol", "'DT', None), ('good', 'JJ', None), ('example', 'NN', None), ('.', '.', None)] arbol_de_dependencias =", "def test__neg_1(self): \"\"\" Prueba el método auxiliar que busca negaciones. Debe encontrar la", "no never again\" self.assertEqual(res, texto_esperado) def _combinar_dict(self, dict1, dict2): for llave in dict1:", "espera que devuelva ('example','not'). \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None),", "'JJ', None), ('time', 'NN', None), ('frame', 'NN', None), (',', ',', None), ('but', 'CC',", "{\"cyclone\":[\"red\"]} \"\"\" com = \"im the red cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]} arbol =", "{\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado =", "= \"black cats are really cute.\" res = self.ex.quitar_palabras(texto) texto_esperado = \"black really", "20), ('neg', 22, 21), ('conj', 6, 22), ('punct', 6, 23)] diccionario_de_aspectos = {\"Member\":[\"member\"]}", "self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__amod_3(self): \"\"\"", "Dado el siguiente comentario: im the red cyclone. Debe devolver {\"cyclone\":[\"red\"]} \"\"\" com", "horrified at some of the people Amazon employs. Enter employee Ruchitha. I was", "taken to locking people out of their ability to comment on products if", "can. Apparently, AMZ does not like this and has taken to locking people", "1 resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado = 'be' self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\"", "dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self): \"\"\"", "de aspectos. Se espera que la palabra 'review' no sea determinado como aspecto", "lately? indice_raiz = 3 indice_nodo = 9 lista_pos_lem = [('do', 'VB', None), ('you',", "auxiliar que es usado para buscar el lema o la palabra de una", "test_extractor_4(self): \"\"\" Dado el siguiente comentario: ultimately, it's a sheep Debe devolver {\"sheep\":[\"ultimately\"]}", "test__amod_4(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra", "siguiente comentario: do you dream of perfectly electric sheep, lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]}", "online customer service chat I am horrified at some of the people Amazon", "def test__nsub_2(self): \"\"\" Prueba el método auxiliar que busca sujetos nominales. Como el", "POS de adverbio, sustantivo o negacion. \"\"\" texto = \"do you dream of", "playable in the country, he called customer service and a rep said they", "{\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self): \"\"\" Pruebas", "a good example.\" diccionario = {\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem", "('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None), ('?', '.', None)] arbol_de_dependencias", "de aspectos. Se espera que la palabra 'review' sea determinado como aspecto 'comment'.", "self.assertEqual(None, resultado) def test__amod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta", "su propio adverbio. Esta prueba espera que al encontrar una dependencia amod que", "nltk class Test(unittest.TestCase): def setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas =", "con comentarios reales \"\"\" com = \"There was a time I was a", "\"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia", "'NN', None), (',', ',', None), ('lately', 'RB', None), ('?', '.', None)] diccionario_de_aspectos =", "[\"example\"]} indice_raiz = 6 indice_nodo = 1 res_esperado = None res = self.ex._extraer_nsubj(indice_raiz,", "nominales. Como el sujeto nominas no va de un adjetivo a un sustantivo,", "dream of perfectly electric sheep, lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com = \"do", "('punct', 3, 10)] diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos,", "of their ability to comment on products if they feel you are not", "dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self): \"\"\" Dado el siguiente", "that wasn't playable in the country, he called customer service and a rep", "wasn't playable in the country, he called customer service and a rep said", "= self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado,", "(\"comment\", \"valid\") \"\"\" indice_raiz = 5 indice_nodo = 4 lista_pos_lem = [('i', 'LS',", "def test_extractor_8(self): \"\"\" Dado el siguiente comentario: They sent him the same, wrong", "After he had bought a dvd that wasn't playable in the country, he", "usado para determinar si una palabra esta en el diccionario de aspectos. Se", "('Prime', 'JJ', None), ('member', 'NN', None), ('for', 'IN', None), ('years', 'NNS', None), ('and',", "warehouses. They can't ship what's not available. Nice way to save a buck.", "('.', '.', None)] indice = 3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado = 'a'", "aspectos en fora de una lista (la salida que ofrece cliente_corenlp.resolver_dependencias). \"\"\" com", "\"really cute\"). \"\"\" lista_pos_lem = [('black', 'JJ', None), ('cats', 'NNS', None), ('are', 'VBP',", "no longer can provide.\" diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias = nltk.sent_tokenize(com) dic_resultado", "('for', 'IN', None), ('years', 'NNS', None), ('and', 'CC', None), ('always', 'RB', None), ('received',", "test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder extraer los aspectos, primero se necesita pasar como argumento", "in dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave] = dict1[llave] return dict2 def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar()", "self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self):", "= [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3, 2), ('case', 7, 4),", "'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 1),", "palabra 'review' sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario =", "indice_nodo = 4 lista_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT',", "'DT', None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] diccionario_de_aspectos =", "was a super-Amazon fan-boy, but those days are long past. If AMZ is", "('nmod', 11, 18), ('punct', 6, 19), ('cc', 6, 20), ('neg', 22, 21), ('conj',", "que de la tupla en la posición 3, devuelve la palabra 'a', ya", "self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self): \"\"\" Prueba el método auxiliar que es usado para", "('merchandise', 'NN', None), ('in', 'IN', None), ('the', 'DT', None), ('desired', 'JJ', None), ('time',", "\"\"\" Dado el siguiente comentario: this is not a good example. Debe devolver", "None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('aux', 6, 2), ('cop',", "self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self): \"\"\" Prueba el método auxiliar que es", "del sustantivo 'cats'. Se espera que devuelva ('cats', \"really cute\"). \"\"\" lista_pos_lem =", "arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self): \"\"\" Dado el siguiente comentario: i really", "etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario, arbol,", "espera que devuelva 'perfectly'. \"\"\" indice_nodo = 6 lista_pos_lem = [('do', 'VB', None),", "I am persona non grata these days. I got the product with a", "adjetivos de un sustantivo poseen su propio adverbio. Esta prueba espera que al", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self): \"\"\" Dado el siguiente comentario:", "un sustantivo poseen su propio adverbio. Esta prueba espera que al encontrar una", "[('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.',", "def test_extractor_4(self): \"\"\" Dado el siguiente comentario: ultimately, it's a sheep Debe devolver", "[('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6, 2), ('neg', 6, 3), ('det',", "\"sheep\":[\"perfectly electric\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self): \"\"\" Dado", "diccionario, arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el método auxiliar que es usado", "('time', 'NN', None), ('frame', 'NN', None), (',', ',', None), ('but', 'CC', None), ('no',", "('punct', 3, 8), ('advmod', 3, 9), ('punct', 3, 10)] diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]}", "Para poder extraer los aspectos, primero se necesita pasar como argumento el arbol", "la etiqueta \"amod\". Se espera None \"\"\" indice_raiz = 4 indice_nodo = 3", "4), ('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3, 7), ('punct', 3, 8),", "diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\" Prueba el método auxiliar que busca sujetos", "def test__amod_4(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se", "customer service and a rep said they were going to send him the", "('advmod', 3, 9), ('punct', 3, 10)] diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz,", "I had the simplest question about a feature on an item I bought", "dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado)", "{\"example\":[\"not good\"]} \"\"\" com = \"this is not a good example.\" diccionario =", "None), ('never', 'RB', \"never\"), ('again', 'RB', \"again\")] self.assertEqual(res, tupla_esperada) def test__unir_palabras(self): \"\"\" Método", "una dependencia con la etiqueta \"amod\". Se espera None \"\"\" indice_raiz = 4", "'a', ya que el lema es None. \"\"\" tupla_pos_lem = [('i', 'LS', None),", "anger their customers. I try to find the best deal with products all", "to be a thing of the past. I have been a Prime member", "def _combinar_dict(self, dict1, dict2): for llave in dict1: if llave in dict2.keys(): dict2[llave].extend(dict1[llave])", "('are', 'VBP', None), ('really', 'RB', None), ('cute', 'JJ', None), ('.', '.', None)] arbol_de_dependencias", "self.assertEqual(res, tupla_esperada) def test__unir_palabras(self): \"\"\" Método auxiliar que une las palabras de la", "texto_esperado) def test_quitar_palabras_2(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda", "tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res = self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't no never again\" self.assertEqual(res,", "para determinar si una palabra esta en el diccionario de aspectos. Se espera", "for my roommate since he doesn't have Prime and isn't really internet savvy.", "indice_nodo = 6 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN',", "espera que la palabra 'review' no sea determinado como aspecto y devuelva None.", "6), ('nmod', 3, 7), ('punct', 3, 8), ('advmod', 3, 9), ('punct', 3, 10)]", "= self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado)", "None), ('time', 'NN', None), ('frame', 'NN', None), (',', ',', None), ('but', 'CC', None),", "self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self): \"\"\" Dado el siguiente comentario: ultimately, it's a sheep", "again\" res = self.ex.quitar_palabras(texto) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def", "la palabra 'review' sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario", "3, 7), ('punct', 3, 8), ('advmod', 3, 9), ('punct', 3, 10)] diccionario_de_aspectos =", "indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__amod_3(self): \"\"\" Prueba", "sent him the same, wrong item. Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com = \"They", "self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self): \"\"\" Método aúxiliar para manejar las conjunciones de un", "lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self): \"\"\" Dado el siguiente comentario: They sent him", "6 indice_nodo = 1 lista_pos_lem = [('ultimately', 'RB', None), (',', ',', None), ('it',", "= [('ROOT', 0, 1), ('det', 4, 2), ('amod', 4, 3), ('dobj', 1, 4),", "= \"They sent him the same, wrong item.\" diccionario = {\"item\":[\"item\", \"items\"]} arbol", "com = \"Usually I have good experiences with Amazon and its customer service", "the country, he called customer service and a rep said they were going", "Dado el siguiente comentario: do you dream of perfectly electric sheep, lately? Debe", "a sheep\" diccionario = {\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem =", "res_esperado) def test__amod_3(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando", "6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz = 5 indice_nodo = 2 res_esperado = (\"cats\",", "= [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None), ('a', 'DT', None),", "arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self): \"\"\" Dado el siguiente comentario: ultimately, it's", "= self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__amod_3(self):", "{\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self): \"\"\" Prueba el método", "self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario,", "la posición 1, devuelve el lema 'be'. \"\"\" tupla_pos_lem = [('i', 'LS', None),", "None)] indice = 1 resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado = 'be' self.assertEqual(resultado, resultado_esperado)", "= self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)", "diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias = nltk.sent_tokenize(com)", "una lista (la salida que ofrece cliente_corenlp.resolver_dependencias). \"\"\" com = \"i am a", "devolver {\"example\":[\"not good\"]} \"\"\" com = \"this is not a good example.\" diccionario", "= self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\", \"good\"]} dic_resultado", "si no recibe el arbol de aspectos en fora de una lista (la", "class Test(unittest.TestCase): def setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador()", "self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self): \"\"\" Dado el siguiente comentario: this", "indice_raiz = 7 indice_nodo = 6 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP',", "el diccionario de aspectos. Se espera que la palabra 'review' no sea determinado", "example. Debe devolver {\"example\":[\"not good\"]} \"\"\" com = \"this is not a good", "\"amod\". Se espera None \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem =", "are not paying the top price. Today I had the simplest question about", "{\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\",", "internet savvy. After he had bought a dvd that wasn't playable in the", "('more', 'JJR', None), ('!!', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6,", "= {\"Dream\": [\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado,", "[\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def", "'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice = 1 resultado = self.ex._buscar_en_tupla_pos_lem(indice,", "the truth. To save money, Amazon no longer uses reliable trucking companies to", "diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in", "\"\"\" # ultimately, it's a sheep indice_raiz = 6 indice_nodo = 1 lista_pos_lem", "{\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]}", "\"\"\" # do you dream of perfectly electric sheep, lately? indice_raiz = 3", "el arbol de aspectos en fora de una lista (la salida que ofrece", "6, 5), ('amod', 7, 6), ('nmod', 3, 7), ('punct', 3, 8), ('advmod', 3,", "',', None), ('it', 'PRP', None), (\"'s\", 'VBZ', None), ('a', 'DT', None), ('sheep', 'NN',", "('cats', 'NNS', None), ('are', 'VBP', None), ('really', 'RB', None), ('cute', 'JJ', None), ('.',", "more!! I have had numerous conversations with customer service and supervisors. All they", "def test__nsub_1(self): \"\"\" Prueba el método auxiliar que busca sujetos nominales. Debe encontrar", "debe regresar None. \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not',", "self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self): \"\"\" Dado el siguiente comentario: black", "devuelve la palabra 'a', ya que el lema es None. \"\"\" tupla_pos_lem =", "dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_9(self): \"\"\" Pruebas con", "Método auxiliar que une las palabras de la lista de tuplas. \"\"\" texto", "{\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"comment\", \"valid\") self.assertEqual(res, res_esperado)", "does not like this and has taken to locking people out of their", "Se espera que devuelva 'perfectly'. \"\"\" indice_nodo = 6 lista_pos_lem = [('do', 'VB',", "el adjetivo 'valid' del aspecto 'comment' \"\"\" com = \"i am a valid", "their ability to comment on products if they feel you are not paying", "determinado como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\", \"review\"]} resultado", "dvd that wasn't playable in the country, he called customer service and a", "feel you are not paying the top price. Today I had the simplest", "say no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res", "cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder extraer los aspectos, primero", "say no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada", "def test__amod_advmod(self): \"\"\" En algunas ocaciones, adjetivos de un sustantivo poseen su propio", "siguiente comentario: black cats are really cute. Debe devolver {\"cats\":[\"black\",\" really cute\"]} \"\"\"", "perfectly electric sheep, lately?\" diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos =", "esta en el diccionario de aspectos. Se espera que la palabra 'comment' sea", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self): \"\"\" Dado el siguiente comentario:", "'dependencia'). \"\"\" # ultimately, it's a sheep indice_raiz = 6 indice_nodo = 1", "returns to do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com) dic_resultado = dict()", "these days, it is finding new and innovated ways to anger their customers.", "aspectos, primero se necesita pasar como argumento el arbol de dependencias que resuelve", "la palabra 'review' no sea determinado como aspecto y devuelva None. \"\"\" palabra", "un adjetivo a un sustantivo, debe regresar None. \"\"\" lista_pos_lem = [('this', 'DT',", "= self.ex._unir_palabras(tupla_purgada) texto_esperado = \"n't no never again\" self.assertEqual(res, texto_esperado) def _combinar_dict(self, dict1,", "busca sujetos nominales. Debe encontrar el adjetivo y adverbio del sustantivo 'cats'. Se", "{\"example\":[\"not\", \"good\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self): \"\"\" Dado", "se ejecuta cuando se encuentra una dependencia con la etiqueta \"amod\". Se espera", "None), ('no', 'DT', None), ('more', 'JJR', None), ('!!', '.', None)] arbol_de_dependencias = [('ROOT',", "\"\"\" com = \"There was a time I was a super-Amazon fan-boy, but", "find the best deal with products all the time and use what discounts", "comentario: i really love black cats. Debe devolver {\"cats\":[\"black\"} \"\"\" com = \"i", "como aspecto 'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\", \"review\"]} resultado =", "comentarios reales \"\"\" com = \"Usually I have good experiences with Amazon and", "18, 14), ('det', 18, 15), ('amod', 18, 16), ('compound', 18, 17), ('nmod', 11,", "= None self.assertEqual(res, res_esperado) def test__amod_5(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que", "En algunas ocaciones, adjetivos de un sustantivo poseen su propio adverbio. Esta prueba", "2), ('case', 7, 4), ('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3, 7),", "'.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('aux', 6, 2),", "no recibe el arbol de aspectos en fora de una lista (la salida", "\"\"\" com = \"im the red cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com)", "('cop', 6, 2), ('neg', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('punct',", "= {\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self): \"\"\" Dado", "= self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]} dic_resultado = self.ex.extraer(diccionario, arbol,", "\"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado)", "dependencia con la etiqueta \"amod\". Se espera una tupla (\"comment\", \"valid\") \"\"\" indice_raiz", "= self.ex.quitar_palabras(texto) texto_esperado = \"black really cute\" self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self): \"\"\" Método", "regresar None. \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB',", "def test_extractor_10(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"There was a", "el método auxiliar que es usado para buscar el lema o la palabra", "3, 9), ('punct', 3, 10)] diccionario_de_aspectos = {\"Sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo,", "'DT', None), ('never', 'RB', \"never\"), ('again', 'RB', \"again\")] self.assertEqual(res, tupla_esperada) def test__unir_palabras(self): \"\"\"", "un adverbio/adjetivo \"\"\" lista_pos_lem = [('I', 'PRP', None), ('have', 'VBP', None), ('been', 'VBN',", "again\" self.assertEqual(res, texto_esperado) def _combinar_dict(self, dict1, dict2): for llave in dict1: if llave", "Se espera que devuelva ('cats', \"really cute\"). \"\"\" lista_pos_lem = [('black', 'JJ', None),", "'comment' \"\"\" com = \"i am a valid comment.\" diccionario = {\"comment\":[\"comment\"]} arbol", "palabra 'a', ya que el lema es None. \"\"\" tupla_pos_lem = [('i', 'LS',", "lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self): \"\"\" Dado el siguiente comentario: ultimately, it's a", "said they were going to send him the correct one. They sent him", "a un adverbio/adjetivo \"\"\" lista_pos_lem = [('I', 'PRP', None), ('have', 'VBP', None), ('been',", "11, 18), ('punct', 6, 19), ('cc', 6, 20), ('neg', 22, 21), ('conj', 6,", "('cats', \"really cute\"). \"\"\" lista_pos_lem = [('black', 'JJ', None), ('cats', 'NNS', None), ('are',", "ocaciones, adjetivos de un sustantivo poseen su propio adverbio. Esta prueba espera que", "Prueba el método auxiliar que busca negaciones. Debe encontrar la negacion del sustantivos", "devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com = \"do you dream of perfectly electric sheep, lately?\"", "like this and has taken to locking people out of their ability to", "'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]} res =", "('.', '.', None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado", "None), ('been', 'VBN', None), ('a', 'DT', None), ('Prime', 'JJ', None), ('member', 'NN', None),", "o negacion. \"\"\" texto = \"do you dream of perfectly electric sheep, lately?\"", "13), ('case', 18, 14), ('det', 18, 15), ('amod', 18, 16), ('compound', 18, 17),", "really cute. Debe devolver {\"cats\":[\"black\",\" really cute\"]} \"\"\" com = \"black cats are", "',', None), ('lately', 'RB', None), ('?', '.', None)] arbol_de_dependencias = [('ROOT', 0, 3),", "None), ('?', '.', None)] arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj',", "\"\"\" Dado el siguiente comentario: im the red cyclone. Debe devolver {\"cyclone\":[\"red\"]} \"\"\"", "\"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]}", "arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado", "diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def", "simplest question about a feature on an item I bought on AMZ, but", "\"\"\" indice_raiz = 5 indice_nodo = 4 lista_pos_lem = [('i', 'LS', None), ('am',", "uses reliable trucking companies to move merchandise from vendors warehousing to Amazon warehouses.", "la etiqueta \"amod\". Se espera una tupla (\"comment\", \"valid\") \"\"\" indice_raiz = 5", "= self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self):", "lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self): \"\"\" Dado el siguiente comentario: im the red", "22 res_esperado = (\"Member\", \"no more\") res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias)", "for llave in dict1: if llave in dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave] = dict1[llave]", "dic_resultado) def test_extractor_6(self): \"\"\" Dado el siguiente comentario: i really love black cats.", "self.lemas = lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder extraer los aspectos, primero se", "'DT', None), ('sheep', 'NN', None)] diccionario_de_aspectos = {\"sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo,", "diccionario = {\"item\":[\"item\", \"items\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)", "the top price. Today I had the simplest question about a feature on", "('a', 'DT', None), ('Prime', 'JJ', None), ('member', 'NN', None), ('for', 'IN', None), ('years',", "self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine", "{\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com = \"do you dream of perfectly electric sheep, lately?\" diccionario", "arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"],", "el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # ultimately, it's", "= self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)", "etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado =", "good example.\" diccionario = {\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem =", "diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_11(self): \"\"\" Pruebas con comentarios reales \"\"\" com", "o negacion. \"\"\" texto = \"black cats are really cute.\" res = self.ex.quitar_palabras(texto)", "{\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 3 res_esperado = (\"example\", \"not\") res", "people Amazon employs. Enter employee Ruchitha. I was trying to get a print", "0, 6), ('nsubj', 6, 1), ('cop', 6, 2), ('neg', 6, 3), ('det', 6,", "('example','not'). \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None),", "('frame', 'NN', None), (',', ',', None), ('but', 'CC', None), ('no', 'DT', None), ('more',", "= {\"example\":[\"not\", \"good\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self): \"\"\"", "(\"sheep\", \"perfectly electric\") \"\"\" # do you dream of perfectly electric sheep, lately?", "= self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] }", "tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ',", "None), ('?', '.', None)] diccionario_de_aspectos = {\"Dream\": [\"dream\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem,", "test_extractor_1(self): \"\"\" Dado el siguiente comentario: i am a valid comment. Debe devolver", "'DT', None), ('more', 'JJR', None), ('!!', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6),", "But keep taking our membership money for services you no longer can provide.\"", "un solo string. Se espera (\"sheep\", \"perfectly electric\") \"\"\" # do you dream", "self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)", "def test__advmod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se", "('punct', 1, 5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado", "('det', 6, 4), ('amod', 6, 5), ('punct', 6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]}", "\"\"\" Dado el siguiente comentario: They sent him the same, wrong item. Debe", "a sheep Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\" com = \"ultimately, it's a sheep\" diccionario", "diccionario = {\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado", "service chat I am horrified at some of the people Amazon employs. Enter", "el advmod del adjetivo 'electric'. Se espera que devuelva 'perfectly'. \"\"\" indice_nodo =", "am a valid comment.\" diccionario = dict() arbol = None pos_lem = list()", "I try to find the best deal with products all the time and", "= self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado =", "self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el método auxiliar que es usado para", "give me the runaround and tell me their policy has not changed. \\\"Two", "('nmod', 6, 8), ('cc', 6, 9), ('advmod', 11, 10), ('conj', 6, 11), ('nmod:poss',", "res = self.ex.quitar_palabras(texto) texto_esperado = \"black really cute\" self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self): \"\"\"", "reps, but after todays online customer service chat I am horrified at some", "espera que la palabra 'comment' sea determinado como aspecto 'comment'. \"\"\" palabra =", "= 1 resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado = 'be' self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self):", "None), ('for', 'IN', None), ('years', 'NNS', None), ('and', 'CC', None), ('always', 'RB', None),", "sheep, lately? indice_raiz = 7 indice_nodo = 6 lista_pos_lem = [('do', 'VB', None),", "= 6 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None),", "= self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\" Prueba el método", "{\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test__conj_1(self): \"\"\"", "encontrar el advmod del adjetivo 'electric'. Se espera que devuelva 'perfectly'. \"\"\" indice_nodo", "cute\"]} \"\"\" com = \"black cats are really cute.\" diccionario = {\"cats\":[\"cat\", \"cats\"]}", "ambos en un solo string. Se espera (\"sheep\", \"perfectly electric\") \"\"\" # do", "que es usado para determinar si una palabra esta en el diccionario de", "\"good\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self): \"\"\" Dado el", "de una tupla pos_lem dado una posición. Se espera que de la tupla", "arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self): \"\"\" Dado el siguiente comentario: black cats", "\"don't say no to cookies, never again\" res = self.ex.quitar_palabras(texto) texto_esperado = \"n't", "thing of the past. I have been a Prime member for years and", "= {\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self): \"\"\" Dado", "self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario,", "self.ex.extraer(com, diccionario, arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el método auxiliar que es", "diccionario_de_aspectos) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_4(self): \"\"\" Prueba el método auxiliar", "('a', 'DT', None), ('sheep', 'NN', None)] diccionario_de_aspectos = {\"sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz,", "= \"i am a valid comment.\" diccionario = dict() arbol = None pos_lem", "= \"this is not a good example.\" diccionario = {\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com)", "('have', 'VBP', None), ('been', 'VBN', None), ('a', 'DT', None), ('Prime', 'JJ', None), ('member',", "day shipping starts when the item leaves the warehouse\\\". They can't ship if", "9), ('advmod', 11, 10), ('conj', 6, 11), ('nmod:poss', 13, 12), ('dobj', 11, 13),", "una etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto = \"don't say", "sea determinado como aspecto y devuelva None. \"\"\" palabra = 'review' diccionario =", "lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_5(self): \"\"\" Prueba el", "resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el método auxiliar que es usado para buscar", "def test__purgar_palabras_pos(self): \"\"\" Método auxiliar que es el que recorre las lista de", "item I bought on AMZ, but cannot ask the question as apparently, I", "(\"cats\", \"really cute\") res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def", "the time and use what discounts where I can. Apparently, AMZ does not", "self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\", \"good\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)", "toda palabra que no tenga una etiqueta POS de adverbio, sustantivo o negacion.", "extractor_de_aspectos.ExtractorDeAspectos() self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder extraer", "{\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado)", "lista_pos_lem, diccionario_de_aspectos) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_4(self): \"\"\" Prueba el método", "shipping seems to be a thing of the past. I have been a", "5), ('amod', 2, 1), ('nsubj', 5, 2), ('cop', 5, 3), ('advmod', 5, 4),", "test_quitar_palabras_3(self): \"\"\" Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que", "cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)", "same, wrong item. Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com = \"They sent him the", "'comment'. \"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\", \"review\"]} resultado = self.ex._es_aspecto(palabra, diccionario)", "'DT', None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos =", "Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com = \"do you dream of perfectly electric sheep,", "time frame, but no more!! I have had numerous conversations with customer service", "= 6 indice_nodo = 1 res_esperado = None res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem,", "test_extraer_dependencia_doble_1(self): \"\"\" Prueba el método auxiliar que busca dependencias de dependencias. Debe encontrar", "the runaround and tell me their policy has not changed. \\\"Two day shipping", "una tupla: ('sustantivo', 'dependencia'). \"\"\" # do you dream of perfectly electric sheep,", "siguiente comentario: im the red cyclone. Debe devolver {\"cyclone\":[\"red\"]} \"\"\" com = \"im", "\"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias = nltk.sent_tokenize(com) dic_resultado = dict()", "método auxiliar que busca sujetos nominales. Debe encontrar el adjetivo y adverbio del", "a valid comment.\" diccionario = dict() arbol = None pos_lem = list() with", "Se espera que de la tupla en la posición 1, devuelve el lema", "customer service chat I am horrified at some of the people Amazon employs.", "diccionario) self.assertEqual(None, resultado) def test__amod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se", "'VB', None), ('you', 'PRP', None), ('dream', 'NN', None), ('of', 'IN', None), ('perfectly', 'RB',", "None), ('merchandise', 'NN', None), ('in', 'IN', None), ('the', 'DT', None), ('desired', 'JJ', None),", "Prime member for years and always received my merchandise in the desired time", "palabra = 'review' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado) def", "poseen su propio adverbio. Esta prueba espera que al encontrar una dependencia amod", "\"perfectly electric\") \"\"\" # do you dream of perfectly electric sheep, lately? indice_raiz", "after todays online customer service chat I am horrified at some of the", "en una tupla: ('sustantivo', 'dependencia'). \"\"\" # ultimately, it's a sheep indice_raiz =", "the product with a discount via research on the net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"],", "am a valid comment. Debe devolver el adjetivo 'valid' del aspecto 'comment' \"\"\"", "(\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def test__advmod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se", "\"black really cute\" self.assertEqual(res, texto_esperado) def test__purgar_palabras_pos(self): \"\"\" Método auxiliar que es el", "product with a discount via research on the net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"],", "[(\"n't\", 'RB', \"n't\"),('no', 'DT', None), ('never', 'RB', \"never\"), ('again', 'RB', \"again\")] self.assertEqual(res, tupla_esperada)", "self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self): \"\"\" Prueba el método auxiliar que es usado para", "= [('ultimately', 'RB', None), (',', ',', None), ('it', 'PRP', None), (\"'s\", 'VBZ', None),", "Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\" com = \"ultimately, it's a sheep\" diccionario = {\"sheep\":[\"sheep\"]}", "= {\"cats\":[\"black\",\"really cute\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_6(self): \"\"\"", "el siguiente comentario: i really love black cats. Debe devolver {\"cats\":[\"black\"} \"\"\" com", "vendors. Shame on you Amazon for not telling the truth. To save money,", "('punct', 5, 6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz = 5 indice_nodo = 2 res_esperado", "\"item\":[\"same\",\"wrong\"] } self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_10(self): \"\"\" Pruebas con comentarios reales \"\"\" com", "they feel you are not paying the top price. Today I had the", "self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario,", "= {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = None self.assertEqual(res, res_esperado)", "salida que ofrece cliente_corenlp.resolver_dependencias). \"\"\" com = \"i am a valid comment.\" diccionario", "def test_extractor_9(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"Usually I have", "the simplest question about a feature on an item I bought on AMZ,", "\"\"\" texto = \"black cats are really cute.\" res = self.ex.quitar_palabras(texto) texto_esperado =", "diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_5(self): \"\"\" Prueba el método", "= self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"Dream\",\"lately\") self.assertEqual(res_esperado, res) def test__amod_advmod(self): \"\"\"", "tenga una etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto = \"don't", "6, 11), ('nmod:poss', 13, 12), ('dobj', 11, 13), ('case', 18, 14), ('det', 18,", "etiqueta \"amod\". Se espera None \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem", "label for my roommate since he doesn't have Prime and isn't really internet", "arbol de aspectos en fora de una lista (la salida que ofrece cliente_corenlp.resolver_dependencias).", "\"\"\" Método auxiliar que une las palabras de la lista de tuplas. \"\"\"", "the same, wrong item.\" diccionario = {\"item\":[\"item\", \"items\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos =", "\"\"\" Método auxiliar que es el que recorre las lista de tuplas para", "res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = None self.assertEqual(res, res_esperado) def", "good\"]} \"\"\" com = \"this is not a good example.\" diccionario = {\"example\":[\"example\"]}", "None), ('cats', 'NNS', None), ('are', 'VBP', None), ('really', 'RB', None), ('cute', 'JJ', None),", "same, wrong item.\" diccionario = {\"item\":[\"item\", \"items\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com)", "= [(\"n't\", 'RB', \"n't\"),('no', 'DT', None), ('never', 'RB', \"never\"), ('again', 'RB', \"again\")] self.assertEqual(res,", "perfectly electric sheep, lately?\" res = self.ex.quitar_palabras(texto) texto_esperado = \"perfectly electric lately\" self.assertEqual(res,", "de tuplas. \"\"\" texto = \"don't say no to cookies, never again\" lista_pos_lem", "'example'. Se espera que devuelva ('example','not'). \"\"\" lista_pos_lem = [('this', 'DT', None), ('is',", "[('ROOT', 0, 1), ('det', 4, 2), ('amod', 4, 3), ('dobj', 1, 4), ('punct',", "regrese el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # do", "'RB', None), ('?', '.', None)] arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1),", "3 indice_nodo = 9 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream',", "'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None), ('sheep', 'NN', None), (',', ',',", "of perfectly electric sheep, lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com = \"do you", "= self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def test__advmod_2(self): \"\"\"", "= 1 res_esperado = None res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado,", "self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem) resultado_esperado = 'be' self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el método", "self.cliente = cliente_corenlp.ClienteCoreNLP() self.lemas = lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder extraer los", "= self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\", 'RB', \"n't\"),('no', 'DT', None), ('never',", "dic_resultado) diccionario_esperado = {\"fan-boy\":[\"super-Amazon\"], \"Amazon\":[\"good\"], \"question\":[\"simple\"], \"thing\":[\"good\"], \"way\":[\"new\"], \"deal\":[\"best\"], \"price\":[\"top\"] } self.assertEqual(diccionario_esperado, dic_resultado)", "las lista de tuplas para eliminar las palabras innecesarias. \"\"\" texto = \"don't", "('example', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6,", "18, 17), ('nmod', 11, 18), ('punct', 6, 19), ('cc', 6, 20), ('neg', 22,", "22, 21), ('conj', 6, 22), ('punct', 6, 23)] diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz =", "2), ('neg', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('punct', 6, 7)]", "\"\"\" Prueba el método auxiliar que es usado para buscar el lema o", "(\"'s\", 'VBZ', None), ('a', 'DT', None), ('sheep', 'NN', None)] diccionario_de_aspectos = {\"sheep\": [\"sheep\"]}", "correct one. They sent him the same, wrong item. So he had 2", "1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem,", "{\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self): \"\"\" Dado el", "Esta prueba espera que al encontrar una dependencia amod que tiene su propio", "= {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in", "None), ('good', 'JJ', None), ('example', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT',", "= \"don't say no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res =", "como aspecto y devuelva None. \"\"\" palabra = 'review' diccionario = {\"comment\":[\"comment\"]} resultado", "\"They sent him the same, wrong item.\" diccionario = {\"item\":[\"item\", \"items\"]} arbol =", "una tupla pos_lem dado una posición. Se espera que de la tupla en", "aspecto 'comment' \"\"\" com = \"i am a valid comment.\" diccionario = {\"comment\":[\"comment\"]}", "('always', 'RB', None), ('received', 'VBD', None), ('my', 'PRP$', None), ('merchandise', 'NN', None), ('in',", "indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def test__advmod_2(self): \"\"\" Prueba el", "res_esperado = (\"sheep\",\"ultimately\") self.assertEqual(res_esperado, res) def test__advmod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia", "non grata these days. I got the product with a discount via research", "2 day shipping seems to be a thing of the past. I have", "test__amod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra", "que es usado para buscar el lema o la palabra de una tupla", "test_extractor_9(self): \"\"\" Pruebas con comentarios reales \"\"\" com = \"Usually I have good", "Se espera que la palabra 'review' sea determinado como aspecto 'comment'. \"\"\" palabra", "= self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self): \"\"\" Dado el siguiente comentario:", "what discounts where I can. Apparently, AMZ does not like this and has", "dic_resultado) def test_extractor_5(self): \"\"\" Dado el siguiente comentario: black cats are really cute.", "\"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una", "ya que el lema es None. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am',", "self.ex.quitar_palabras(texto) texto_esperado = \"perfectly electric lately\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self): \"\"\" Prueba el", "top price. Today I had the simplest question about a feature on an", "no to cookies, never again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem) res =", "\"Prime 2 day shipping seems to be a thing of the past. I", "None), ('the', 'DT', None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)]", "None), ('.', '.', None)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)", "\"i am a valid comment.\" diccionario = dict() arbol = None pos_lem =", "cliente_corenlp from lematizador import lematizador import nltk class Test(unittest.TestCase): def setUp(self): self.ex =", "indice_raiz = 6 indice_nodo = 22 res_esperado = (\"Member\", \"no more\") res =", "4), ('punct', 1, 5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)", "sheep, lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\" com = \"do you dream of perfectly", "= self.ex.quitar_palabras(texto) texto_esperado = \"perfectly electric lately\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_2(self): \"\"\" Prueba", "self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"experience\":[\"good\"], \"Amazon\":[], \"item\":[\"same\",\"wrong\"] }", "None), ('member', 'NN', None), ('for', 'IN', None), ('years', 'NNS', None), ('and', 'CC', None),", "test__nsub_2(self): \"\"\" Prueba el método auxiliar que busca sujetos nominales. Como el sujeto", "('desired', 'JJ', None), ('time', 'NN', None), ('frame', 'NN', None), (',', ',', None), ('but',", "de un sustantivo a un adverbio/adjetivo \"\"\" lista_pos_lem = [('I', 'PRP', None), ('have',", "= self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__nsub_2(self): \"\"\" Prueba el", "'RB', \"never\"), ('again', 'RB', \"again\")] self.assertEqual(res, tupla_esperada) def test__unir_palabras(self): \"\"\" Método auxiliar que", "None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"comment\",", "= dict() arbol = None pos_lem = list() with self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol,", "9), ('punct', 3, 10)] res_esperado = \"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado,", "None \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem = [('im', 'VB', None),", "valid comment. Debe devolver el adjetivo 'valid' del aspecto 'comment' \"\"\" com =", "lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self): \"\"\" Dado el siguiente comentario: black cats are", "arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3, 2), ('case', 7,", "3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz,", "POS de adverbio, sustantivo o negacion. \"\"\" texto = \"black cats are really", "= \"do you dream of perfectly electric sheep, lately?\" res = self.ex.quitar_palabras(texto) texto_esperado", "changed. \\\"Two day shipping starts when the item leaves the warehouse\\\". They can't", "('the', 'DT', None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos", "def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el método auxiliar que es usado para buscar el", "('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo,", "= 'be' self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el método auxiliar que es", "lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self): \"\"\" Prueba", "sheep, lately?\" diccionario = {\"dream\":[\"dream\"], \"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem", "res_esperado = (\"example\", \"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def", "dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave] = dict1[llave] return dict2 def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if", "{\"Sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly electric\")", "membership money for services you no longer can provide.\" diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"],", "for years and always received my merchandise in the desired time frame, but", "= self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado) def", "test__es_aspecto_1(self): \"\"\" Prueba el método auxiliar que es usado para determinar si una", "una dependencia amod que tiene su propio advmod, se devuelvan ambos en un", "money for services you no longer can provide.\" diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], }", "are really cute.\" res = self.ex.quitar_palabras(texto) texto_esperado = \"black really cute\" self.assertEqual(res, texto_esperado)", "None res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_1(self): \"\"\"", "14), ('det', 18, 15), ('amod', 18, 16), ('compound', 18, 17), ('nmod', 11, 18),", "\"i am a valid comment.\" diccionario = {\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos =", "dependencia amod que tiene su propio advmod, se devuelvan ambos en un solo", "('dobj', 11, 13), ('case', 18, 14), ('det', 18, 15), ('amod', 18, 16), ('compound',", "\"\"\" lista_pos_lem = [('I', 'PRP', None), ('have', 'VBP', None), ('been', 'VBN', None), ('a',", "sentencia in sentencias: arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res", "self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self): \"\"\" Prueba el método auxiliar que es", "aspectos. Se espera que la palabra 'review' no sea determinado como aspecto y", "('a', 'DT', None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] diccionario_de_aspectos", "test__es_aspecto_2(self): \"\"\" Prueba el método auxiliar que es usado para determinar si una", "2 returns to do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com) dic_resultado =", "diccionario = {\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado", "really cute.\" res = self.ex.quitar_palabras(texto) texto_esperado = \"black really cute\" self.assertEqual(res, texto_esperado) def", "7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 1 res_esperado =", "with products all the time and use what discounts where I can. Apparently,", "arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__nsub_2(self): \"\"\" Prueba el método auxiliar que busca sujetos", "None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] indice = 3", "el método auxiliar que es usado para determinar si una palabra esta en", "None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT',", "= {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\", \"price\"],} sentencias = nltk.sent_tokenize(com) dic_resultado", "comentario: do you dream of perfectly electric sheep, lately? Debe devolver {\"dream\":[\"dream\"],\"sheep\":[\"sheep\"]} \"\"\"", "None), ('is', 'VBZ', None), ('not', 'RB', None), ('a', 'DT', None), ('good', 'JJ', None),", "= None pos_lem = list() with self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self):", "dict1: if llave in dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave] = dict1[llave] return dict2 def", "to cookies, never again\" res = self.ex.quitar_palabras(texto) texto_esperado = \"n't no never again\"", "('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.',", "[\"example\"]} indice_raiz = 6 indice_nodo = 3 res_esperado = (\"example\", \"not\") res =", "import extractor_de_aspectos from cliente_corenlp import cliente_corenlp from lematizador import lematizador import nltk class", "'CC', None), ('always', 'RB', None), ('received', 'VBD', None), ('my', 'PRP$', None), ('merchandise', 'NN',", "def test__advmod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se", "= \"black cats are really cute.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com)", "lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder extraer los aspectos, primero se necesita pasar", "= self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)", "{\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = None self.assertEqual(res, res_esperado)", "at some of the people Amazon employs. Enter employee Ruchitha. I was trying", "the best deal with products all the time and use what discounts where", "('and', 'CC', None), ('always', 'RB', None), ('received', 'VBD', None), ('my', 'PRP$', None), ('merchandise',", "res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__amod_3(self): \"\"\" Prueba el método auxiliar", "15), ('amod', 18, 16), ('compound', 18, 17), ('nmod', 11, 18), ('punct', 6, 19),", "# do you dream of perfectly electric sheep, lately? indice_raiz = 7 indice_nodo", "always received my merchandise in the desired time frame, but no more!! I", "love black cats. Debe devolver {\"cats\":[\"black\"} \"\"\" com = \"i really love black", "Dado el siguiente comentario: They sent him the same, wrong item. Debe devolver", "de la tupla en la posición 3, devuelve la palabra 'a', ya que", "comentario: black cats are really cute. Debe devolver {\"cats\":[\"black\",\" really cute\"]} \"\"\" com", "am a valid comment.\" diccionario = {\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com)", "de la lista de tuplas. \"\"\" texto = \"don't say no to cookies,", "Se espera una tupla (\"cyclone\", \"red\") \"\"\" indice_raiz = 4 indice_nodo = 3", "el siguiente comentario: ultimately, it's a sheep Debe devolver {\"sheep\":[\"ultimately\"]} \"\"\" com =", "Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que no tenga", "import lematizador import nltk class Test(unittest.TestCase): def setUp(self): self.ex = extractor_de_aspectos.ExtractorDeAspectos() self.cliente =", "sheep indice_raiz = 6 indice_nodo = 1 lista_pos_lem = [('ultimately', 'RB', None), (',',", "None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 5), ('amod', 2, 1), ('nsubj',", "[('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None), ('of', 'IN', None), ('perfectly',", "'VBZ', None), ('a', 'DT', None), ('sheep', 'NN', None)] diccionario_de_aspectos = {\"sheep\": [\"sheep\"]} res", "que es el que recorre las lista de tuplas para eliminar las palabras", "not changed. \\\"Two day shipping starts when the item leaves the warehouse\\\". They", "[('I', 'PRP', None), ('have', 'VBP', None), ('been', 'VBN', None), ('a', 'DT', None), ('Prime',", "com = \"i am a valid comment.\" diccionario = {\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com)", "\"\"\" com = \"black cats are really cute.\" diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol", "lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado) def test__nsub_1(self): \"\"\" Prueba el método auxiliar que busca", "him the correct one. They sent him the same, wrong item. So he", "si una palabra esta en el diccionario de aspectos. Se espera que la", "arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self): \"\"\" Dado el siguiente comentario: They sent", "21), ('conj', 6, 22), ('punct', 6, 23)] diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz = 6", "3), ('advmod', 5, 4), ('punct', 5, 6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz = 5", "5 indice_nodo = 2 res_esperado = (\"cats\", \"really cute\") res = self.ex._extraer_nsubj(indice_raiz, indice_nodo,", "self.assertRaises(Exception): self.ex.extraer(com, diccionario, arbol, pos_lem) def test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el método auxiliar que", "_extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta \"amod\".", "2), ('cop', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('case', 8, 7),", "devolver {\"cyclone\":[\"red\"]} \"\"\" com = \"im the red cyclone.\" diccionario = {\"cyclone\":[\"cyclone\"]} arbol", "sheep, lately?\" res = self.ex.quitar_palabras(texto) texto_esperado = \"perfectly electric lately\" self.assertEqual(res, texto_esperado) def", "\"\"\" indice_nodo = 6 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream',", "red cyclone. Debe devolver {\"cyclone\":[\"red\"]} \"\"\" com = \"im the red cyclone.\" diccionario", "def test__es_aspecto_1(self): \"\"\" Prueba el método auxiliar que es usado para determinar si", "(\"cyclone\", \"red\") \"\"\" indice_raiz = 4 indice_nodo = 3 lista_pos_lem = [('im', 'VB',", "5, 4), ('punct', 5, 6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz = 5 indice_nodo =", "arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado", "arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly", "test__buscar_en_tupla_pos_lem(self): \"\"\" Prueba el método auxiliar que es usado para buscar el lema", "warehouses, seemly blaming the vendors. Shame on you Amazon for not telling the", "une las palabras de la lista de tuplas. \"\"\" texto = \"don't say", "am horrified at some of the people Amazon employs. Enter employee Ruchitha. I", "6, 2), ('neg', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('punct', 6,", "\"price\"],} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias: arbol =", "desired time frame, but no more!! I have had numerous conversations with customer", "diccionario_esperado = {\"cats\":[\"black\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_7(self): \"\"\"", "self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado, dic_resultado) def", "Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\" com = \"They sent him the same, wrong item.\"", "diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia", "not a good example. Debe devolver {\"example\":[\"not good\"]} \"\"\" com = \"this is", "say no to cookies, never again\" res = self.ex.quitar_palabras(texto) texto_esperado = \"n't no", "3 res_esperado = (\"example\", \"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo, lista_pos_lem=lista_pos_lem, diccionario_de_aspectos=diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) self.assertEqual(res,res_esperado)", "= self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_11(self): \"\"\" Pruebas con", "of perfectly electric sheep, lately?\" res = self.ex.quitar_palabras(texto) texto_esperado = \"perfectly electric lately\"", "con la etiqueta \"amod\". Se espera una tupla (\"cyclone\", \"red\") \"\"\" indice_raiz =", "dic_resultado) def test_extractor_8(self): \"\"\" Dado el siguiente comentario: They sent him the same,", "= {\"Member\":[\"member\"]} indice_raiz = 6 indice_nodo = 22 res_esperado = (\"Member\", \"no more\")", "del sustantivo en una tupla: ('sustantivo', 'dependencia'). \"\"\" # do you dream of", "('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None), ('sheep', 'NN', None), (',',", "None), ('lately', 'RB', None), ('?', '.', None)] diccionario_de_aspectos = {\"Dream\": [\"dream\"]} res =", "'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem,", "lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"comment\", \"valid\") self.assertEqual(res, res_esperado) def test__amod_2(self): \"\"\" Prueba el", "0, 6), ('nsubj', 6, 1), ('aux', 6, 2), ('cop', 6, 3), ('det', 6,", "('det', 4, 2), ('amod', 4, 3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos", "None), ('sheep', 'NN', None)] diccionario_de_aspectos = {\"sheep\": [\"sheep\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem,", "None. \"\"\" palabra = 'review' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None,", "had 2 returns to do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com) dic_resultado", "1, 5)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado", "he doesn't have Prime and isn't really internet savvy. After he had bought", "def test_extractor_2(self): \"\"\" Dado el siguiente comentario: im the red cyclone. Debe devolver", "can't ship if the items are not in their warehouses, seemly blaming the", "lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado,", "res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_11(self): \"\"\" Pruebas", "ability to comment on products if they feel you are not paying the", "la tupla en la posición 3, devuelve la palabra 'a', ya que el", "Debe devolver {\"example\":[\"not good\"]} \"\"\" com = \"this is not a good example.\"", "10), ('conj', 6, 11), ('nmod:poss', 13, 12), ('dobj', 11, 13), ('case', 18, 14),", "22), ('punct', 6, 23)] diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz = 6 indice_nodo = 22", "self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado,", "6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 1 res_esperado", "got the product with a discount via research on the net.\" diccionario =", "= self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias=arbol_de_dependencias) res_esperado = None self.assertEqual(res, res_esperado) def test__amod_5(self):", "'review' no sea determinado como aspecto y devuelva None. \"\"\" palabra = 'review'", "him the same, wrong item.\" diccionario = {\"item\":[\"item\", \"items\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos", "con la etiqueta \"amod\". Se espera None \"\"\" indice_raiz = 4 indice_nodo =", "\"n't no never again\" self.assertEqual(res, texto_esperado) def test_quitar_palabras_3(self): \"\"\" Prueba el metodo quitar_palabras.", "None), ('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None), ('sheep', 'NN', None),", "{\"comment\":[\"comment\", \"review\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self): \"\"\" Prueba el", "cats are really cute. Debe devolver {\"cats\":[\"black\",\" really cute\"]} \"\"\" com = \"black", "not a good example.\" diccionario = {\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com)", "1 lista_pos_lem = [('ultimately', 'RB', None), (',', ',', None), ('it', 'PRP', None), (\"'s\",", "que busca negaciones. Debe encontrar la negacion del sustantivos 'example'. Se espera que", "def test_extractor_5(self): \"\"\" Dado el siguiente comentario: black cats are really cute. Debe", "el sujeto nominas no va de un adjetivo a un sustantivo, debe regresar", "6, 19), ('cc', 6, 20), ('neg', 22, 21), ('conj', 6, 22), ('punct', 6,", "lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_1(self): \"\"\" Dado el siguiente comentario: i", "They sent him the same, wrong item. So he had 2 returns to", "self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__neg_1(self): \"\"\" Prueba el método auxiliar que", "finding new and innovated ways to anger their customers. I try to find", "self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado,", "dream of perfectly electric sheep, lately? indice_raiz = 3 indice_nodo = 9 lista_pos_lem", "diccionario = {\"cats\":[\"cat\", \"cats\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)", "que la palabra 'review' no sea determinado como aspecto y devuelva None. \"\"\"", "again\" lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto)) res = self.ex._purgar_palabras_pos(lista_pos_lem) tupla_esperada = [(\"n't\", 'RB', \"n't\"),('no', 'DT',", "{\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_5(self): \"\"\" Dado el", "They can't ship if the items are not in their warehouses, seemly blaming", "para buscar el lema o la palabra de una tupla pos_lem dado una", "tupla: ('sustantivo', 'dependencia'). \"\"\" # ultimately, it's a sheep indice_raiz = 6 indice_nodo", "sustantivo o negacion. \"\"\" texto = \"do you dream of perfectly electric sheep,", "diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_2(self): \"\"\" Prueba el método auxiliar que es usado", "Ruchitha. I was trying to get a print out label for my roommate", "Prueba el método auxiliar que es usado para determinar si una palabra esta", "texto = \"don't say no to cookies, never again\" res = self.ex.quitar_palabras(texto) texto_esperado", "arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos = self.cliente.etiquetar_texto(sentencia) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol,", "res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test__nsub_2(self): \"\"\" Prueba", "7), ('nmod', 6, 8), ('cc', 6, 9), ('advmod', 11, 10), ('conj', 6, 11),", "('?', '.', None)] arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3,", "('advmod', 3, 9), ('punct', 3, 10)] res_esperado = \"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem,", "had bought a dvd that wasn't playable in the country, he called customer", "nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias: arbol = self.cliente.resolver_dependencias(sentencia) etiquetas_pos =", "es usado para buscar el lema o la palabra de una tupla pos_lem", "3), ('det', 6, 4), ('amod', 6, 5), ('punct', 6, 7)] diccionario_de_aspectos = {\"example\":", "test__amod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra", "('member', 'NN', None), ('for', 'IN', None), ('years', 'NNS', None), ('and', 'CC', None), ('always',", "= {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self):", "you dream of perfectly electric sheep, lately?\" res = self.ex.quitar_palabras(texto) texto_esperado = \"perfectly", "que busca sujetos nominales. Debe encontrar el adjetivo y adverbio del sustantivo 'cats'.", "None), ('not', 'RB', None), ('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN', None),", "has taken to locking people out of their ability to comment on products", "on products if they feel you are not paying the top price. Today", "more\") res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias) self.assertEqual(res_esperado, res) def test_extractor_11(self): \"\"\"", "item.\" diccionario = {\"item\":[\"item\", \"items\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem =", "one. They sent him the same, wrong item. So he had 2 returns", "6 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None), ('of',", "I have been a Prime member for years and always received my merchandise", "('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2), ('amod', 4,", "member for years and always received my merchandise in the desired time frame,", "dado una posición. Se espera que de la tupla en la posición 3,", "way to save a buck. But keep taking our membership money for services", "indice_raiz = 4 indice_nodo = 3 lista_pos_lem = [('im', 'VB', None), ('the', 'DT',", "('punct', 6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo = 1", "import cliente_corenlp from lematizador import lematizador import nltk class Test(unittest.TestCase): def setUp(self): self.ex", "\"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None), ('a',", "del aspecto 'comment' \"\"\" com = \"i am a valid comment.\" diccionario =", "reales \"\"\" com = \"Prime 2 day shipping seems to be a thing", "roommate since he doesn't have Prime and isn't really internet savvy. After he", "None), (\"'s\", 'VBZ', None), ('a', 'DT', None), ('sheep', 'NN', None)] diccionario_de_aspectos = {\"sheep\":", "arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado", "es usado para determinar si una palabra esta en el diccionario de aspectos.", "\"review\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self): \"\"\" Prueba el método", "Amazon for not telling the truth. To save money, Amazon no longer uses", "encuentra una dependencia con la etiqueta \"advmod\". Se espera que regrese el adverbio", "All they do is give me the runaround and tell me their policy", "('cop', 5, 3), ('advmod', 5, 4), ('punct', 5, 6)] diccionario_de_aspectos = {\"cats\":[\"cats\"]} indice_raiz", "lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self): \"\"\" Dado el siguiente comentario: do you dream", "def test_extractor_6(self): \"\"\" Dado el siguiente comentario: i really love black cats. Debe", "Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia", "('is', 'VBZ', None), ('not', 'RB', None), ('a', 'DT', None), ('good', 'JJ', None), ('example',", "que la palabra 'comment' sea determinado como aspecto 'comment'. \"\"\" palabra = 'comment'", "etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario, arbol,", "self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado = 'a' self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self): \"\"\" Prueba el método", "('punct', 6, 19), ('cc', 6, 20), ('neg', 22, 21), ('conj', 6, 22), ('punct',", "('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo,", "leaves the warehouse\\\". They can't ship if the items are not in their", "no longer uses reliable trucking companies to move merchandise from vendors warehousing to", "{\"cats\":[\"black\",\" really cute\"]} \"\"\" com = \"black cats are really cute.\" diccionario =", "sustantivo 'cats'. Se espera que devuelva ('cats', \"really cute\"). \"\"\" lista_pos_lem = [('black',", "long past. If AMZ is good at one thing these days, it is", "{\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias:", "el arbol de dependencias que resuelve el Stanford CoreNLP. Prueba que el método", "self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_4(self): \"\"\" Dado el siguiente comentario: ultimately,", "self.assertEqual(res_esperado, res) def test__nsub_2(self): \"\"\" Prueba el método auxiliar que busca sujetos nominales.", "my roommate since he doesn't have Prime and isn't really internet savvy. After", "= 'comment' diccionario = {\"comment\":[\"comment\", \"review\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def", "None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]}", "Se espera que de la tupla en la posición 3, devuelve la palabra", "negaciones. Debe encontrar la negacion del sustantivos 'example'. Se espera que devuelva ('example','not').", "5), ('case', 8, 7), ('nmod', 6, 8), ('cc', 6, 9), ('advmod', 11, 10),", "palabra = 'comment' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def", "('amod', 4, 3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos = {\"not\":[\"ok\"]} res", "0, 3), ('aux', 3, 1), ('nsubj', 3, 2), ('case', 7, 4), ('advmod', 6,", "has not changed. \\\"Two day shipping starts when the item leaves the warehouse\\\".", "comentario: this is not a good example. Debe devolver {\"example\":[\"not good\"]} \"\"\" com", "'RB', \"again\")] self.assertEqual(res, tupla_esperada) def test__unir_palabras(self): \"\"\" Método auxiliar que une las palabras", "siguiente comentario: They sent him the same, wrong item. Debe devolver {\"item\":[\"same\",\"wrong\"]} \"\"\"", "to do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for", "no more!! I have had numerous conversations with customer service and supervisors. All", "encuentra una dependencia con la etiqueta \"amod\". Se espera None \"\"\" indice_raiz =", "None), ('perfectly', 'RB', None), ('electric', 'JJ', None), ('sheep', 'NN', None), (',', ',', None),", "para eliminar las palabras innecesarias. \"\"\" texto = \"don't say no to cookies,", "'valid'), ('comment', 'NN', 'comment'), ('.', '.', None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz,", "provide.\" diccionario = {\"Member\":[\"member\",\"Member\"], \"Shipping\":[\"shipping\",\"Shipping\"], } sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for", "= self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\", \"good\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado)", "6, 5), ('punct', 6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz = 6 indice_nodo", "keep taking our membership money for services you no longer can provide.\" diccionario", "('amod', 2, 1), ('nsubj', 5, 2), ('cop', 5, 3), ('advmod', 5, 4), ('punct',", "unittest from extractor import extractor_de_aspectos from cliente_corenlp import cliente_corenlp from lematizador import lematizador", "resultado_esperado) def test__es_aspecto_1(self): \"\"\" Prueba el método auxiliar que es usado para determinar", "self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos) res_esperado = (\"cyclone\", \"red\") self.assertEqual(res, res_esperado) def test__advmod_1(self): \"\"\"", "None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'), ('comment', 'NN', 'comment'),", "\"valid\") self.assertEqual(res, res_esperado) def test__amod_2(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se", "is not a good example.\" diccionario = {\"example\":[\"example\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos =", "comentario: i am a valid comment. Debe devolver el adjetivo 'valid' del aspecto", "diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self): \"\"\"", "'comment'), ('.', '.', None)] diccionario_de_aspectos = {\"comment\":[\"comment\"]} res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)", "\"price\":[\"prices\", \"price\"],} sentencias = nltk.sent_tokenize(com) dic_resultado = dict() for sentencia in sentencias: arbol", "\"\"\" palabra = 'comment' diccionario = {\"comment\":[\"comment\", \"review\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\",", "a un sustantivo, debe regresar None. \"\"\" lista_pos_lem = [('this', 'DT', None), ('is',", "self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"cats\":[\"black\",\"really cute\"]} dic_resultado =", "lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"dream\":[\"lately\"], \"sheep\":[\"perfectly electric\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)", "ofrece cliente_corenlp.resolver_dependencias). \"\"\" com = \"i am a valid comment.\" diccionario = dict()", "indice = 3 resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado = 'a' self.assertEqual(resultado, resultado_esperado) def", "discount via research on the net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"],", "resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(None, resultado) def test__amod_1(self): \"\"\" Prueba el método auxiliar", "las conjunciones de un sustantivo a un adverbio/adjetivo \"\"\" lista_pos_lem = [('I', 'PRP',", "\"\"\" Dado el siguiente comentario: do you dream of perfectly electric sheep, lately?", "el siguiente comentario: do you dream of perfectly electric sheep, lately? Debe devolver", "resultado_esperado = 'be' self.assertEqual(resultado, resultado_esperado) def test__buscar_en_tupla_pos_lem_2(self): \"\"\" Prueba el método auxiliar que", "'VB', None), ('the', 'DT', None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.',", "todays online customer service chat I am horrified at some of the people", "tupla pos_lem dado una posición. Se espera que de la tupla en la", "AMZ, but cannot ask the question as apparently, I am persona non grata", "4, 2), ('amod', 4, 3), ('dobj', 1, 4), ('punct', 1, 5)] diccionario_de_aspectos =", "test__advmod_1(self): \"\"\" Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra", "devuelva None. \"\"\" palabra = 'review' diccionario = {\"comment\":[\"comment\"]} resultado = self.ex._es_aspecto(palabra, diccionario)", "lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"item\":[\"same\",\"wrong\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) print(diccionario_esperado) self.assertEqual(diccionario_esperado,", "\"\"\" com = \"They sent him the same, wrong item.\" diccionario = {\"item\":[\"item\",", "('det', 6, 4), ('amod', 6, 5), ('case', 8, 7), ('nmod', 6, 8), ('cc',", "prueba espera que al encontrar una dependencia amod que tiene su propio advmod,", "las palabras de la lista de tuplas. \"\"\" texto = \"don't say no", "Se espera que devuelva ('example','not'). \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ',", "reliable trucking companies to move merchandise from vendors warehousing to Amazon warehouses. They", "from cliente_corenlp import cliente_corenlp from lematizador import lematizador import nltk class Test(unittest.TestCase): def", "self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self): \"\"\" Prueba el método auxiliar que es usado para", "espera que la palabra 'review' sea determinado como aspecto 'comment'. \"\"\" palabra =", "= lematizador.Lematizador() def test_extractor_recibe_arbol_de_dependencias(self): \"\"\" Para poder extraer los aspectos, primero se necesita", "self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self): \"\"\" Prueba el método auxiliar que busca dependencias de", "experiences with Amazon and its customer service reps, but after todays online customer", "diccionario de aspectos. Se espera que la palabra 'comment' sea determinado como aspecto", "('cc', 6, 9), ('advmod', 11, 10), ('conj', 6, 11), ('nmod:poss', 13, 12), ('dobj',", "auxiliar que busca negaciones. Debe encontrar la negacion del sustantivos 'example'. Se espera", "dependencia con la etiqueta \"advmod\". Se espera que regrese el adverbio del sustantivo", "runaround and tell me their policy has not changed. \\\"Two day shipping starts", "manejar las conjunciones de un sustantivo a un adverbio/adjetivo \"\"\" lista_pos_lem = [('I',", "these days. I got the product with a discount via research on the", "una palabra esta en el diccionario de aspectos. Se espera que la palabra", "3, 2), ('case', 7, 4), ('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3,", "etiqueta \"amod\". Se espera una tupla (\"comment\", \"valid\") \"\"\" indice_raiz = 5 indice_nodo", "it's a sheep\" diccionario = {\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem", "etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\", \"good\"]} dic_resultado = self.ex.extraer(diccionario,", "etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"comment\":[\"valid\"]} dic_resultado = self.ex.extraer(diccionario, arbol,", "devuelva 'perfectly'. \"\"\" indice_nodo = 6 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP',", "el lema es None. \"\"\" tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'),", "self.lemas.lematizar_tuplas(etiquetas_pos) res = self.ex.extraer(diccionario, arbol, lista_pos_lem) dic_resultado = self._combinar_dict(res, dic_resultado) diccionario_esperado = {\"experience\":[\"good\"],", "Stanford CoreNLP. Prueba que el método extraer levante una excepcion si no recibe", "arbol_de_dependencias=arbol_de_dependencias) res_esperado = (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self): \"\"\" Prueba el método", "('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] diccionario_de_aspectos = {\"cyclone\":[\"cyclone\"]} res", "persona non grata these days. I got the product with a discount via", "None. \"\"\" lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None),", "= (\"Sheep\",\"perfectly electric\") self.assertEqual(res_esperado, res) def test_extraer_dependencia_doble_1(self): \"\"\" Prueba el método auxiliar que", "self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]} dic_resultado = self.ex.extraer(diccionario,", "res_esperado = None self.assertEqual(res, res_esperado) def test__amod_4(self): \"\"\" Prueba el método auxiliar _extraer_dependencia", "extraer levante una excepcion si no recibe el arbol de aspectos en fora", "método auxiliar que es usado para determinar si una palabra esta en el", "arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_2(self): \"\"\" Dado el siguiente comentario: im the", "= 1 lista_pos_lem = [('ultimately', 'RB', None), (',', ',', None), ('it', 'PRP', None),", "tenga una etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto = \"do", "item. So he had 2 returns to do.\" diccionario = {\"experience\":[\"experiences\",\"experience\"],\"Amazon\":[\"Amazon\",\"amazon\"], \"item\":[\"item\",\"items\"]} sentencias", "'VBN', None), ('a', 'DT', None), ('Prime', 'JJ', None), ('member', 'NN', None), ('for', 'IN',", "busca negaciones. Debe encontrar la negacion del sustantivos 'example'. Se espera que devuelva", "'VBZ', None), ('not', 'RB', None), ('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN',", "{\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"sheep\":[\"ultimately\"]}", "have Prime and isn't really internet savvy. After he had bought a dvd", "\"ultimately, it's a sheep\" diccionario = {\"sheep\":[\"sheep\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com)", "2 res_esperado = (\"cats\", \"really cute\") res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias)", "the red cyclone. Debe devolver {\"cyclone\":[\"red\"]} \"\"\" com = \"im the red cyclone.\"", "('been', 'VBN', None), ('a', 'DT', None), ('Prime', 'JJ', None), ('member', 'NN', None), ('for',", "innovated ways to anger their customers. I try to find the best deal", "you dream of perfectly electric sheep, lately? indice_raiz = 3 indice_nodo = 9", "dict1, dict2): for llave in dict1: if llave in dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave]", "('punct', 3, 10)] res_esperado = \"perfectly\" res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias) self.assertEqual(res_esperado, res)", "esta en el diccionario de aspectos. Se espera que la palabra 'review' sea", "= self.cliente.etiquetar_texto(com) lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos) diccionario_esperado = {\"example\":[\"not\", \"good\"]} dic_resultado = self.ex.extraer(diccionario, arbol,", "= 6 indice_nodo = 3 res_esperado = (\"example\", \"not\") res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo,", "out of their ability to comment on products if they feel you are", "se encuentra una dependencia con la etiqueta \"amod\". Se espera una tupla (\"cyclone\",", "diccionario_de_aspectos) res_esperado = (\"comment\", \"valid\") self.assertEqual(res, res_esperado) def test__amod_2(self): \"\"\" Prueba el método", "but after todays online customer service chat I am horrified at some of", "to find the best deal with products all the time and use what", "negacion. \"\"\" texto = \"do you dream of perfectly electric sheep, lately?\" res", "test_extractor_7(self): \"\"\" Dado el siguiente comentario: this is not a good example. Debe", "research on the net.\" diccionario = {\"fan-boy\":[\"fan-boy\"],\"Amazon\":[\"Amazon\",\"amazon\",\"AMZ\"], \"question\":[\"question\"], \"thing\":[\"thing\", \"things\"], \"way\":[\"way\",\"ways\"], \"deal\":[\"deal\",\"deals\"], \"price\":[\"prices\",", "'JJ', None), ('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None), ('?', '.',", "'NN', None), ('.', '.', None)] arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2),", "diccionario_esperado = {\"cyclone\":[\"red\"]} dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem) self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_3(self): \"\"\"", "11, 13), ('case', 18, 14), ('det', 18, 15), ('amod', 18, 16), ('compound', 18,", "etiqueta POS de adverbio, sustantivo o negacion. \"\"\" texto = \"black cats are", "if llave in dict2.keys(): dict2[llave].extend(dict1[llave]) else: dict2[llave] = dict1[llave] return dict2 def tearDown(self):", "6, 4), ('amod', 6, 5), ('punct', 6, 7)] diccionario_de_aspectos = {\"example\": [\"example\"]} indice_raiz", "return dict2 def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if __name__ == \"__main__\": #import sys;sys.argv =", "valid comment.\" diccionario = {\"comment\":[\"comment\"]} arbol = self.cliente.resolver_dependencias(com) etiquetas_pos = self.cliente.etiquetar_texto(com) lista_pos_lem =", "my merchandise in the desired time frame, but no more!! I have had", "not telling the truth. To save money, Amazon no longer uses reliable trucking", "diccionario_esperado = {\"Member\":[\"Prime\", \"no more\"], \"Shipping\":[\"day\"], } self.assertEqual(diccionario_esperado, dic_resultado) def test_quitar_palabras(self): \"\"\" Prueba", "Debe encontrar el adjetivo y adverbio del sustantivo 'cats'. Se espera que devuelva", "'.', None)] arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3, 2),", "la tupla en la posición 1, devuelve el lema 'be'. \"\"\" tupla_pos_lem =", "palabra = 'comment' diccionario = {\"comment\":[\"comment\", \"review\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado)", "'PRP$', None), ('merchandise', 'NN', None), ('in', 'IN', None), ('the', 'DT', None), ('desired', 'JJ',", "('amod', 18, 16), ('compound', 18, 17), ('nmod', 11, 18), ('punct', 6, 19), ('cc',", "self.assertEqual(diccionario_esperado, dic_resultado) def test_extractor_8(self): \"\"\" Dado el siguiente comentario: They sent him the", "dict2[llave] = dict1[llave] return dict2 def tearDown(self): self.cliente.cerrar_servicio() self.ex.cerrar() if __name__ == \"__main__\":", "('the', 'DT', None), ('red', 'JJ', None), ('cyclone', 'NN', None), ('.', '.', None)] arbol_de_dependencias", "indice_raiz = 3 indice_nodo = 9 lista_pos_lem = [('do', 'VB', None), ('you', 'PRP',", "('punct', 6, 23)] diccionario_de_aspectos = {\"Member\":[\"member\"]} indice_raiz = 6 indice_nodo = 22 res_esperado", "resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem) resultado_esperado = 'a' self.assertEqual(resultado, resultado_esperado) def test__es_aspecto_1(self): \"\"\" Prueba", "se necesita pasar como argumento el arbol de dependencias que resuelve el Stanford", "diccionario = {\"comment\":[\"comment\", \"review\"]} resultado = self.ex._es_aspecto(palabra, diccionario) self.assertEqual(\"comment\", resultado) def test__es_aspecto_3(self): \"\"\"", "customer service and supervisors. All they do is give me the runaround and", "dependencia con la etiqueta \"amod\". Se espera None \"\"\" indice_raiz = 4 indice_nodo", "Debe encontrar la negacion del sustantivos 'example'. Se espera que devuelva ('example','not'). \"\"\"", "item leaves the warehouse\\\". They can't ship if the items are not in" ]
[ "np import csv import plot from sklearn.neural_network import BernoulliRBM def error(a, b): return", "model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1, random_state=None, verbose=1) for _ in range(20): for", "150] for item in nodes: errors = [] model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10,", "= end def reformat_data(data): return data.reshape((28, 28)) def run(train_data, test_data): batch_size=10 n_samples =", "[] model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1, random_state=None, verbose=1) for _ in range(20):", "0: end = start + this_n if n_samples is not None: end =", "slice(start, end, None) start = end def reformat_data(data): return data.reshape((28, 28)) def run(train_data,", "_ in row])) with open('binMNIST_data\\\\bindigit_tst.csv') as f: reader = csv.reader(f) for row in", "= n // n_packs if pack_num < n % n_packs: this_n += 1", "def reformat_data(data): return data.reshape((28, 28)) def run(train_data, test_data): batch_size=10 n_samples = np.array(train_data).shape[0] n_batches", "def gen_even_slices(n, n_packs, n_samples=None): start = 0 if n_packs < 1: raise ValueError(\"gen_even_slices", "reader = csv.reader(f) for row in reader: test_data.append(np.array([int(_) for _ in row])) run(train_data,", "def percent_error(a, b): return sum(error(a[i], b[i]) for i in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs,", "in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs, n_samples=None): start = 0 if n_packs < 1:", "import numpy as np import csv import plot from sklearn.neural_network import BernoulliRBM def", "end = start + this_n if n_samples is not None: end = min(n_samples,", "as f: reader = csv.reader(f) for row in reader: test_data.append(np.array([int(_) for _ in", "_ in range(20): for batch_slice in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0]))", "this_n += 1 if this_n > 0: end = start + this_n if", "percent_error(a, b): return sum(error(a[i], b[i]) for i in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs, n_samples=None):", "as f: reader = csv.reader(f) for row in reader: train_data.append(np.array([int(_) for _ in", "data.reshape((28, 28)) def run(train_data, test_data): batch_size=10 n_samples = np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples) /", "end) yield slice(start, end, None) start = end def reformat_data(data): return data.reshape((28, 28))", "< n % n_packs: this_n += 1 if this_n > 0: end =", "reformat_data(data): return data.reshape((28, 28)) def run(train_data, test_data): batch_size=10 n_samples = np.array(train_data).shape[0] n_batches =", "run(train_data, test_data): batch_size=10 n_samples = np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples) / batch_size)) batch_slices =", "in reader: train_data.append(np.array([int(_) for _ in row])) with open('binMNIST_data\\\\bindigit_tst.csv') as f: reader =", "def error(a, b): return (a != b).sum() def percent_error(a, b): return sum(error(a[i], b[i])", "if this_n > 0: end = start + this_n if n_samples is not", "for row in reader: train_data.append(np.array([int(_) for _ in row])) with open('binMNIST_data\\\\bindigit_tst.csv') as f:", "is not None: end = min(n_samples, end) yield slice(start, end, None) start =", "= [] with open('binMNIST_data\\\\bindigit_trn.csv') as f: reader = csv.reader(f) for row in reader:", "batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches, n_samples)) nodes = [50, 75, 100, 150]", "if pack_num < n % n_packs: this_n += 1 if this_n > 0:", "batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item == 50 or item", "<reponame>JonasWechsler/NeuralNetsLab4<gh_stars>0 import numpy as np import csv import plot from sklearn.neural_network import BernoulliRBM", "yield slice(start, end, None) start = end def reformat_data(data): return data.reshape((28, 28)) def", "n_samples = np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples) / batch_size)) batch_slices = list(gen_even_slices(n_batches * batch_size,", "f: reader = csv.reader(f) for row in reader: train_data.append(np.array([int(_) for _ in row]))", "verbose=1) for _ in range(20): for batch_slice in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors)", "% n_packs) for pack_num in range(n_packs): this_n = n // n_packs if pack_num", "28)) def run(train_data, test_data): batch_size=10 n_samples = np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples) / batch_size))", "start = 0 if n_packs < 1: raise ValueError(\"gen_even_slices got n_packs=%s, must be", "if n_samples is not None: end = min(n_samples, end) yield slice(start, end, None)", "or item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ == \"__main__\": train_data = [] test_data", "in range(20): for batch_slice in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if", "nodes: errors = [] model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1, random_state=None, verbose=1) for", "error(a, b): return (a != b).sum() def percent_error(a, b): return sum(error(a[i], b[i]) for", "from sklearn.neural_network import BernoulliRBM def error(a, b): return (a != b).sum() def percent_error(a,", "in row])) with open('binMNIST_data\\\\bindigit_tst.csv') as f: reader = csv.reader(f) for row in reader:", "f: reader = csv.reader(f) for row in reader: test_data.append(np.array([int(_) for _ in row]))", "= [] model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1, random_state=None, verbose=1) for _ in", "b): return (a != b).sum() def percent_error(a, b): return sum(error(a[i], b[i]) for i", "if n_packs < 1: raise ValueError(\"gen_even_slices got n_packs=%s, must be >=1\" % n_packs)", "range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs, n_samples=None): start = 0 if n_packs < 1: raise", "learning_rate=0.1, batch_size=10, n_iter=1, random_state=None, verbose=1) for _ in range(20): for batch_slice in batch_slices:", "row in reader: train_data.append(np.array([int(_) for _ in row])) with open('binMNIST_data\\\\bindigit_tst.csv') as f: reader", "< 1: raise ValueError(\"gen_even_slices got n_packs=%s, must be >=1\" % n_packs) for pack_num", "item in nodes: errors = [] model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1, random_state=None,", "with open('binMNIST_data\\\\bindigit_tst.csv') as f: reader = csv.reader(f) for row in reader: test_data.append(np.array([int(_) for", "nodes = [50, 75, 100, 150] for item in nodes: errors = []", "n_packs, n_samples=None): start = 0 if n_packs < 1: raise ValueError(\"gen_even_slices got n_packs=%s,", "== 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ == \"__main__\": train_data = [] test_data = []", "start = end def reformat_data(data): return data.reshape((28, 28)) def run(train_data, test_data): batch_size=10 n_samples", "b[i]) for i in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs, n_samples=None): start = 0 if", "\"__main__\": train_data = [] test_data = [] with open('binMNIST_data\\\\bindigit_trn.csv') as f: reader =", "start + this_n if n_samples is not None: end = min(n_samples, end) yield", "= start + this_n if n_samples is not None: end = min(n_samples, end)", "n_samples is not None: end = min(n_samples, end) yield slice(start, end, None) start", "sklearn.neural_network import BernoulliRBM def error(a, b): return (a != b).sum() def percent_error(a, b):", "if item == 50 or item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ == \"__main__\":", "return sum(error(a[i], b[i]) for i in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs, n_samples=None): start =", "open('binMNIST_data\\\\bindigit_trn.csv') as f: reader = csv.reader(f) for row in reader: train_data.append(np.array([int(_) for _", "n_packs < 1: raise ValueError(\"gen_even_slices got n_packs=%s, must be >=1\" % n_packs) for", "row])) with open('binMNIST_data\\\\bindigit_tst.csv') as f: reader = csv.reader(f) for row in reader: test_data.append(np.array([int(_)", "None: end = min(n_samples, end) yield slice(start, end, None) start = end def", "in nodes: errors = [] model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1, random_state=None, verbose=1)", "train_data.append(np.array([int(_) for _ in row])) with open('binMNIST_data\\\\bindigit_tst.csv') as f: reader = csv.reader(f) for", "errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item == 50 or item == 100:", "= list(gen_even_slices(n_batches * batch_size, n_batches, n_samples)) nodes = [50, 75, 100, 150] for", "end = min(n_samples, end) yield slice(start, end, None) start = end def reformat_data(data):", "np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples) / batch_size)) batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches, n_samples))", "for pack_num in range(n_packs): this_n = n // n_packs if pack_num < n", "in range(n_packs): this_n = n // n_packs if pack_num < n % n_packs:", "ValueError(\"gen_even_slices got n_packs=%s, must be >=1\" % n_packs) for pack_num in range(n_packs): this_n", "= np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples) / batch_size)) batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches,", "% n_packs: this_n += 1 if this_n > 0: end = start +", "BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1, random_state=None, verbose=1) for _ in range(20): for batch_slice in", "n_packs: this_n += 1 if this_n > 0: end = start + this_n", ">=1\" % n_packs) for pack_num in range(n_packs): this_n = n // n_packs if", "model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item == 50 or item ==", "BernoulliRBM def error(a, b): return (a != b).sum() def percent_error(a, b): return sum(error(a[i],", "csv.reader(f) for row in reader: train_data.append(np.array([int(_) for _ in row])) with open('binMNIST_data\\\\bindigit_tst.csv') as", "plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item == 50 or item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ ==", "+= 1 if this_n > 0: end = start + this_n if n_samples", "* batch_size, n_batches, n_samples)) nodes = [50, 75, 100, 150] for item in", "item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ == \"__main__\": train_data = [] test_data =", "sum(error(a[i], b[i]) for i in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs, n_samples=None): start = 0", "n_packs) for pack_num in range(n_packs): this_n = n // n_packs if pack_num <", "plot from sklearn.neural_network import BernoulliRBM def error(a, b): return (a != b).sum() def", "min(n_samples, end) yield slice(start, end, None) start = end def reformat_data(data): return data.reshape((28,", "return data.reshape((28, 28)) def run(train_data, test_data): batch_size=10 n_samples = np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples)", "n_batches = int(np.ceil(float(n_samples) / batch_size)) batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches, n_samples)) nodes", "75, 100, 150] for item in nodes: errors = [] model = BernoulliRBM(n_components=item,", "b).sum() def percent_error(a, b): return sum(error(a[i], b[i]) for i in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n,", "import plot from sklearn.neural_network import BernoulliRBM def error(a, b): return (a != b).sum()", "test_data = [] with open('binMNIST_data\\\\bindigit_trn.csv') as f: reader = csv.reader(f) for row in", "train_data = [] test_data = [] with open('binMNIST_data\\\\bindigit_trn.csv') as f: reader = csv.reader(f)", "csv import plot from sklearn.neural_network import BernoulliRBM def error(a, b): return (a !=", "n_packs=%s, must be >=1\" % n_packs) for pack_num in range(n_packs): this_n = n", "for _ in row])) with open('binMNIST_data\\\\bindigit_tst.csv') as f: reader = csv.reader(f) for row", "plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item == 50 or item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__", "in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item == 50 or", "b): return sum(error(a[i], b[i]) for i in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs, n_samples=None): start", "plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item == 50 or item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if", "as np import csv import plot from sklearn.neural_network import BernoulliRBM def error(a, b):", "random_state=None, verbose=1) for _ in range(20): for batch_slice in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data))", "batch_size=10, n_iter=1, random_state=None, verbose=1) for _ in range(20): for batch_slice in batch_slices: model.partial_fit(train_data[batch_slice])", "reader = csv.reader(f) for row in reader: train_data.append(np.array([int(_) for _ in row])) with", "= 0 if n_packs < 1: raise ValueError(\"gen_even_slices got n_packs=%s, must be >=1\"", "+ this_n if n_samples is not None: end = min(n_samples, end) yield slice(start,", "test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item == 50 or item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784))", "raise ValueError(\"gen_even_slices got n_packs=%s, must be >=1\" % n_packs) for pack_num in range(n_packs):", "> 0: end = start + this_n if n_samples is not None: end", "[] test_data = [] with open('binMNIST_data\\\\bindigit_trn.csv') as f: reader = csv.reader(f) for row", "range(20): for batch_slice in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item", "numpy as np import csv import plot from sklearn.neural_network import BernoulliRBM def error(a,", "n % n_packs: this_n += 1 if this_n > 0: end = start", "not None: end = min(n_samples, end) yield slice(start, end, None) start = end", "for item in nodes: errors = [] model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1,", "list(gen_even_slices(n_batches * batch_size, n_batches, n_samples)) nodes = [50, 75, 100, 150] for item", "n_batches, n_samples)) nodes = [50, 75, 100, 150] for item in nodes: errors", "n // n_packs if pack_num < n % n_packs: this_n += 1 if", "// n_packs if pack_num < n % n_packs: this_n += 1 if this_n", "100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ == \"__main__\": train_data = [] test_data = [] with", "1: raise ValueError(\"gen_even_slices got n_packs=%s, must be >=1\" % n_packs) for pack_num in", "with open('binMNIST_data\\\\bindigit_trn.csv') as f: reader = csv.reader(f) for row in reader: train_data.append(np.array([int(_) for", "[] with open('binMNIST_data\\\\bindigit_trn.csv') as f: reader = csv.reader(f) for row in reader: train_data.append(np.array([int(_)", "pack_num < n % n_packs: this_n += 1 if this_n > 0: end", "= int(np.ceil(float(n_samples) / batch_size)) batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches, n_samples)) nodes =", "i in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs, n_samples=None): start = 0 if n_packs <", "item == 50 or item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ == \"__main__\": train_data", "test_data): batch_size=10 n_samples = np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples) / batch_size)) batch_slices = list(gen_even_slices(n_batches", "plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ == \"__main__\": train_data = [] test_data = [] with open('binMNIST_data\\\\bindigit_trn.csv')", "gen_even_slices(n, n_packs, n_samples=None): start = 0 if n_packs < 1: raise ValueError(\"gen_even_slices got", "pack_num in range(n_packs): this_n = n // n_packs if pack_num < n %", "n_packs if pack_num < n % n_packs: this_n += 1 if this_n >", "0 if n_packs < 1: raise ValueError(\"gen_even_slices got n_packs=%s, must be >=1\" %", "__name__ == \"__main__\": train_data = [] test_data = [] with open('binMNIST_data\\\\bindigit_trn.csv') as f:", "open('binMNIST_data\\\\bindigit_tst.csv') as f: reader = csv.reader(f) for row in reader: test_data.append(np.array([int(_) for _", "batch_size=10 n_samples = np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples) / batch_size)) batch_slices = list(gen_even_slices(n_batches *", "reader: train_data.append(np.array([int(_) for _ in row])) with open('binMNIST_data\\\\bindigit_tst.csv') as f: reader = csv.reader(f)", "be >=1\" % n_packs) for pack_num in range(n_packs): this_n = n // n_packs", "if __name__ == \"__main__\": train_data = [] test_data = [] with open('binMNIST_data\\\\bindigit_trn.csv') as", "/ batch_size)) batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches, n_samples)) nodes = [50, 75,", "end def reformat_data(data): return data.reshape((28, 28)) def run(train_data, test_data): batch_size=10 n_samples = np.array(train_data).shape[0]", "batch_size)) batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches, n_samples)) nodes = [50, 75, 100,", "errors = [] model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1, random_state=None, verbose=1) for _", "None) start = end def reformat_data(data): return data.reshape((28, 28)) def run(train_data, test_data): batch_size=10", "1 if this_n > 0: end = start + this_n if n_samples is", "this_n > 0: end = start + this_n if n_samples is not None:", "return (a != b).sum() def percent_error(a, b): return sum(error(a[i], b[i]) for i in", "[50, 75, 100, 150] for item in nodes: errors = [] model =", "this_n = n // n_packs if pack_num < n % n_packs: this_n +=", "import csv import plot from sklearn.neural_network import BernoulliRBM def error(a, b): return (a", "int(np.ceil(float(n_samples) / batch_size)) batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches, n_samples)) nodes = [50,", "100, 150] for item in nodes: errors = [] model = BernoulliRBM(n_components=item, learning_rate=0.1,", "== 50 or item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ == \"__main__\": train_data =", "= [] test_data = [] with open('binMNIST_data\\\\bindigit_trn.csv') as f: reader = csv.reader(f) for", "end, None) start = end def reformat_data(data): return data.reshape((28, 28)) def run(train_data, test_data):", "== \"__main__\": train_data = [] test_data = [] with open('binMNIST_data\\\\bindigit_trn.csv') as f: reader", "batch_size, n_batches, n_samples)) nodes = [50, 75, 100, 150] for item in nodes:", "= BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1, random_state=None, verbose=1) for _ in range(20): for batch_slice", "n_samples)) nodes = [50, 75, 100, 150] for item in nodes: errors =", "range(n_packs): this_n = n // n_packs if pack_num < n % n_packs: this_n", "must be >=1\" % n_packs) for pack_num in range(n_packs): this_n = n //", "(a != b).sum() def percent_error(a, b): return sum(error(a[i], b[i]) for i in range(len(a)))/float(len(a)*len(a[0]))", "= csv.reader(f) for row in reader: train_data.append(np.array([int(_) for _ in row])) with open('binMNIST_data\\\\bindigit_tst.csv')", "!= b).sum() def percent_error(a, b): return sum(error(a[i], b[i]) for i in range(len(a)))/float(len(a)*len(a[0])) def", "for _ in range(20): for batch_slice in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0]))", "def run(train_data, test_data): batch_size=10 n_samples = np.array(train_data).shape[0] n_batches = int(np.ceil(float(n_samples) / batch_size)) batch_slices", "for batch_slice in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item ==", "n_iter=1, random_state=None, verbose=1) for _ in range(20): for batch_slice in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data),", "import BernoulliRBM def error(a, b): return (a != b).sum() def percent_error(a, b): return", "= csv.reader(f) for row in reader: test_data.append(np.array([int(_) for _ in row])) run(train_data, test_data)", "batch_slice in batch_slices: model.partial_fit(train_data[batch_slice]) errors.append(percent_error(model.gibbs(test_data), test_data)) plot.plot_points(errors) plot.plot_heatmap(reformat_data(test_data[0])) plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0])) if item == 50", "= [50, 75, 100, 150] for item in nodes: errors = [] model", "for i in range(len(a)))/float(len(a)*len(a[0])) def gen_even_slices(n, n_packs, n_samples=None): start = 0 if n_packs", "this_n if n_samples is not None: end = min(n_samples, end) yield slice(start, end,", "n_samples=None): start = 0 if n_packs < 1: raise ValueError(\"gen_even_slices got n_packs=%s, must", "got n_packs=%s, must be >=1\" % n_packs) for pack_num in range(n_packs): this_n =", "= min(n_samples, end) yield slice(start, end, None) start = end def reformat_data(data): return", "50 or item == 100: plot.plot_heatmap(model.__dict__['components_'].reshape(item,784)) if __name__ == \"__main__\": train_data = []" ]
[ "int(xy4[1])); gl.glEnd(); def draw_text(text, x, y, size, anchor_x): ''' Draw text in playfield.", "symbol == key.RIGHT: game_state.direction = (1, 0) if symbol == key.UP: game_state.direction =", "0) if symbol == key.RIGHT: game_state.direction = (1, 0) if symbol == key.UP:", "= time def reset(): game_state = Game_state() game_state.draw_snake_parts() return game_state def draw_polygon(xy1, xy2,", "menu_text() if game_state.state == 'dead': dead_text() if game_state.state == 'game_over': game_over_text() def dead_text():", "draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move with ← ↓ ↑ →', x=game_field.origin_xy0_menu[0]+300,", "with ← ↓ ↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80,", "game_state.keys.append(('enter', 0)) def on_key_release(symbol, modifiers): ''' On key release. ''' if symbol ==", "def on_key_release(symbol, modifiers): ''' On key release. ''' if symbol == key.ENTER: game_state.keys.clear()", "y2 = game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) batch.draw() menu_text()", "x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def move(t): time_to_move[0] -= t if time_to_move[0] < 0:", "= game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1))", "symbol == key.DOWN: game_state.direction = (0, -1) if symbol == key.ENTER: game_state.keys.append(('enter', 0))", "y1)) x1 = game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1]", "('enter', 0) in game_state.keys: game_state.restart_conditions() time = max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2)", "= game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1, y2), (x2,", "y1 = game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1, y2),", "x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t eat walls", "''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def draw_text(text,", "game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1) gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0] y1 =", "* int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] = time def reset(): game_state = Game_state() game_state.draw_snake_parts() return", "x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30,", "size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def", "anchor_x='left') draw_text('Don\\'t eat walls or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65,", "text = pyglet.text.Label( text, font_name='Arial', font_size=size, x=x, y=y, anchor_x=anchor_x) text.draw() window = pyglet.window.Window(game_field.size_window()[0],", "# SNAKE GAME import pyglet from pyglet import gl from pyglet.window import key", "''' User press key for setting snake direction. ''' if symbol == key.LEFT:", "= (1, 0) if symbol == key.UP: game_state.direction = (0, 1) if symbol", "game_state.direction = (0, 1) if symbol == key.DOWN: game_state.direction = (0, -1) if", "on_key_release(symbol, modifiers): ''' On key release. ''' if symbol == key.ENTER: game_state.keys.clear() def", "draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) batch.draw() menu_text() if game_state.state ==", "(x2, y1)) batch.draw() menu_text() if game_state.state == 'dead': dead_text() if game_state.state == 'game_over':", "= (0, -1) if symbol == key.ENTER: game_state.keys.append(('enter', 0)) def on_key_release(symbol, modifiers): '''", "def menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move with ← ↓ ↑", "y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press ENTER',", "== key.RIGHT: game_state.direction = (1, 0) if symbol == key.UP: game_state.direction = (0,", "game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) batch.draw()", "(x1, y2), (x2, y2), (x2, y1)) x1 = game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1] x2", "anchor_x='left') draw_text('Move with ← ↓ ↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat Apples',", "size=30, anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def move(t): time_to_move[0] -= t", "return game_state def draw_polygon(xy1, xy2, xy3, xy4): ''' Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]),", "On key release. ''' if symbol == key.ENTER: game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1,", "from pyglet import gl from pyglet.window import key from images_load import batch from", "size=16, anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t eat walls or yourself.',", "dead_text(): draw_text('For continue set right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def menu_text(): draw_text('in", "game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) batch.draw() menu_text() if game_state.state", "in game_state.keys: game_state.restart_conditions() time = max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] =", "size, anchor_x): ''' Draw text in playfield. ''' text = pyglet.text.Label( text, font_name='Arial',", "game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) x1 = game_field.origin_xy0_menu[0] y1", "anchor_x): ''' Draw text in playfield. ''' text = pyglet.text.Label( text, font_name='Arial', font_size=size,", "x1 = game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1,", "game_over_text() def dead_text(): draw_text('For continue set right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def", "game_state.draw_snake_parts() return game_state def draw_polygon(xy1, xy2, xy3, xy4): ''' Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP);", "= (-1, 0) if symbol == key.RIGHT: game_state.direction = (1, 0) if symbol", "symbol == key.LEFT: game_state.direction = (-1, 0) if symbol == key.RIGHT: game_state.direction =", "if symbol == key.ENTER: game_state.keys.append(('enter', 0)) def on_key_release(symbol, modifiers): ''' On key release.", "eat walls or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left')", "gl.glColor3f(1, 1, 1) gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0]", "(0, -1) if symbol == key.ENTER: game_state.keys.append(('enter', 0)) def on_key_release(symbol, modifiers): ''' On", "y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def move(t): time_to_move[0] -= t if time_to_move[0] < 0: game_state.move(t)", "if symbol == key.ENTER: game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1) gl.glLineWidth(4) x1", "def reset(): game_state = Game_state() game_state.draw_snake_parts() return game_state def draw_polygon(xy1, xy2, xy3, xy4):", "yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115,", "key.RIGHT: game_state.direction = (1, 0) if symbol == key.UP: game_state.direction = (0, 1)", "= Game_state() game_state.draw_snake_parts() return game_state def draw_polygon(xy1, xy2, xy3, xy4): ''' Draw polygon.", "game_state.direction = (1, 0) if symbol == key.UP: game_state.direction = (0, 1) if", "t if time_to_move[0] < 0: game_state.move(t) if game_state.state == 'game_over' and ('enter', 0)", "0.05 * int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] = time def reset(): game_state = Game_state() game_state.draw_snake_parts()", "def dead_text(): draw_text('For continue set right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def menu_text():", "draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t eat walls or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110,", "= game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1] draw_polygon((x1, y1),", "= game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1))", "import Game_state from field import game_field time_to_move = [0.7] def on_key_press(symbol, modifiers): '''", "import batch from game_state import Game_state from field import game_field time_to_move = [0.7]", "symbol == key.ENTER: game_state.keys.append(('enter', 0)) def on_key_release(symbol, modifiers): ''' On key release. '''", "y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move with ← ↓ ↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left')", "for setting snake direction. ''' if symbol == key.LEFT: game_state.direction = (-1, 0)", "x2 = game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2,", "size=16, anchor_x='left') draw_text('Move with ← ↓ ↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat", "''' Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1]));", "time_to_move[0] -= t if time_to_move[0] < 0: game_state.move(t) if game_state.state == 'game_over' and", "int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def draw_text(text, x, y, size, anchor_x): '''", "import game_field time_to_move = [0.7] def on_key_press(symbol, modifiers): ''' User press key for", "game_state import Game_state from field import game_field time_to_move = [0.7] def on_key_press(symbol, modifiers):", "- 0.05 * int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] = time def reset(): game_state = Game_state()", "field import game_field time_to_move = [0.7] def on_key_press(symbol, modifiers): ''' User press key", "if symbol == key.UP: game_state.direction = (0, 1) if symbol == key.DOWN: game_state.direction", "== key.ENTER: game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1) gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0]", "game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1, y2), (x2, y2),", "y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2,", "key from images_load import batch from game_state import Game_state from field import game_field", "game_state.move(t) if game_state.state == 'game_over' and ('enter', 0) in game_state.keys: game_state.restart_conditions() time =", "''' Draw text in playfield. ''' text = pyglet.text.Label( text, font_name='Arial', font_size=size, x=x,", "xy2, xy3, xy4): ''' Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]),", "anchor_x='center') def move(t): time_to_move[0] -= t if time_to_move[0] < 0: game_state.move(t) if game_state.state", "from images_load import batch from game_state import Game_state from field import game_field time_to_move", "gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1]", "y1), (x1, y2), (x2, y2), (x2, y1)) batch.draw() menu_text() if game_state.state == 'dead':", "← ↓ ↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16,", "game_state.direction = (-1, 0) if symbol == key.RIGHT: game_state.direction = (1, 0) if", "y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t eat walls or", "import gl from pyglet.window import key from images_load import batch from game_state import", "Game_state from field import game_field time_to_move = [0.7] def on_key_press(symbol, modifiers): ''' User", "on_key_press(symbol, modifiers): ''' User press key for setting snake direction. ''' if symbol", "key for setting snake direction. ''' if symbol == key.LEFT: game_state.direction = (-1,", "game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1,", "time_to_move[0] < 0: game_state.move(t) if game_state.state == 'game_over' and ('enter', 0) in game_state.keys:", "font_name='Arial', font_size=size, x=x, y=y, anchor_x=anchor_x) text.draw() window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state = reset()", "(x2, y2), (x2, y1)) x1 = game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0]", "anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def move(t): time_to_move[0] -= t if", "def on_key_press(symbol, modifiers): ''' User press key for setting snake direction. ''' if", "y1), (x1, y2), (x2, y2), (x2, y1)) x1 = game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1]", "gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1) gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1] x2 =", "xy3, xy4): ''' Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1]));", "= [0.7] def on_key_press(symbol, modifiers): ''' User press key for setting snake direction.", "anchor_x='left') def game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140,", "def draw_text(text, x, y, size, anchor_x): ''' Draw text in playfield. ''' text", "pyglet import gl from pyglet.window import key from images_load import batch from game_state", "size=30, anchor_x='center') def menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move with ←", "window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state = reset() window.push_handlers( on_draw=on_draw, on_key_press=on_key_press, ) pyglet.clock.schedule_interval(move, 1/30)", "walls or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)),", "(1, 0) if symbol == key.UP: game_state.direction = (0, 1) if symbol ==", "from game_state import Game_state from field import game_field time_to_move = [0.7] def on_key_press(symbol,", "pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state = reset() window.push_handlers( on_draw=on_draw, on_key_press=on_key_press, ) pyglet.clock.schedule_interval(move, 1/30) pyglet.clock.schedule_interval(game_state.add_food, 5)", "time def reset(): game_state = Game_state() game_state.draw_snake_parts() return game_state def draw_polygon(xy1, xy2, xy3,", "< 0: game_state.move(t) if game_state.state == 'game_over' and ('enter', 0) in game_state.keys: game_state.restart_conditions()", "Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move with ← ↓ ↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50,", "def move(t): time_to_move[0] -= t if time_to_move[0] < 0: game_state.move(t) if game_state.state ==", "playfield. ''' text = pyglet.text.Label( text, font_name='Arial', font_size=size, x=x, y=y, anchor_x=anchor_x) text.draw() window", "modifiers): ''' User press key for setting snake direction. ''' if symbol ==", "y2), (x2, y1)) batch.draw() menu_text() if game_state.state == 'dead': dead_text() if game_state.state ==", "draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center')", "if time_to_move[0] < 0: game_state.move(t) if game_state.state == 'game_over' and ('enter', 0) in", "↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t", "game_state = Game_state() game_state.draw_snake_parts() return game_state def draw_polygon(xy1, xy2, xy3, xy4): ''' Draw", "OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def move(t):", "game_field.size_window()[1]) game_state = reset() window.push_handlers( on_draw=on_draw, on_key_press=on_key_press, ) pyglet.clock.schedule_interval(move, 1/30) pyglet.clock.schedule_interval(game_state.add_food, 5) pyglet.app.run()", "gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def draw_text(text, x, y, size, anchor_x): ''' Draw", "y2), (x2, y2), (x2, y1)) x1 = game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1] x2 =", "draw_text(text, x, y, size, anchor_x): ''' Draw text in playfield. ''' text =", "1) if symbol == key.DOWN: game_state.direction = (0, -1) if symbol == key.ENTER:", "y2), (x2, y1)) x1 = game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0] y2", "→', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t eat", "polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def", "Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t eat walls or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16,", "if symbol == key.RIGHT: game_state.direction = (1, 0) if symbol == key.UP: game_state.direction", "int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def draw_text(text, x, y, size,", "y, size, anchor_x): ''' Draw text in playfield. ''' text = pyglet.text.Label( text,", "draw_text('Don\\'t eat walls or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30,", "Draw text in playfield. ''' text = pyglet.text.Label( text, font_name='Arial', font_size=size, x=x, y=y,", "pyglet.text.Label( text, font_name='Arial', font_size=size, x=x, y=y, anchor_x=anchor_x) text.draw() window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state", "GAME import pyglet from pyglet import gl from pyglet.window import key from images_load", "size=20, anchor_x='center') def move(t): time_to_move[0] -= t if time_to_move[0] < 0: game_state.move(t) if", "if game_state.state == 'game_over': game_over_text() def dead_text(): draw_text('For continue set right direction', x=game_field.size_window()[0]//2,", "'game_over' and ('enter', 0) in game_state.keys: game_state.restart_conditions() time = max(0.7 - 0.05 *", "continue set right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25,", "symbol == key.ENTER: game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1) gl.glLineWidth(4) x1 =", "draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) x1 = game_field.origin_xy0_menu[0] y1 =", "y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) x1 =", "direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left')", "def game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20,", "or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70,", "gl.glEnd(); def draw_text(text, x, y, size, anchor_x): ''' Draw text in playfield. '''", "game_state.keys: game_state.restart_conditions() time = max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] = time", "right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16,", "1, 1) gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0] y2", "draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def game_over_text(): draw_text('GAME", "size=16, anchor_x='left') draw_text('Don\\'t eat walls or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70,", "on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1) gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1] x2", "== 'dead': dead_text() if game_state.state == 'game_over': game_over_text() def dead_text(): draw_text('For continue set", "game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center')", "key.DOWN: game_state.direction = (0, -1) if symbol == key.ENTER: game_state.keys.append(('enter', 0)) def on_key_release(symbol,", "ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def move(t): time_to_move[0] -= t if time_to_move[0] <", "y2), (x2, y2), (x2, y1)) batch.draw() menu_text() if game_state.state == 'dead': dead_text() if", "draw_text('For continue set right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def menu_text(): draw_text('in Python',", "draw_polygon(xy1, xy2, xy3, xy4): ''' Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1]));", "x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press", "move(t): time_to_move[0] -= t if time_to_move[0] < 0: game_state.move(t) if game_state.state == 'game_over'", "0) if symbol == key.UP: game_state.direction = (0, 1) if symbol == key.DOWN:", "'dead': dead_text() if game_state.state == 'game_over': game_over_text() def dead_text(): draw_text('For continue set right", "game_state.state == 'game_over' and ('enter', 0) in game_state.keys: game_state.restart_conditions() time = max(0.7 -", "x, y, size, anchor_x): ''' Draw text in playfield. ''' text = pyglet.text.Label(", "if game_state.state == 'dead': dead_text() if game_state.state == 'game_over': game_over_text() def dead_text(): draw_text('For", "key.ENTER: game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1) gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0] y1", "(x2, y2), (x2, y1)) batch.draw() menu_text() if game_state.state == 'dead': dead_text() if game_state.state", "game_state.state == 'game_over': game_over_text() def dead_text(): draw_text('For continue set right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100,", "= max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] = time def reset(): game_state", "release. ''' if symbol == key.ENTER: game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1)", "'game_over': game_over_text() def dead_text(): draw_text('For continue set right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center')", "reset(): game_state = Game_state() game_state.draw_snake_parts() return game_state def draw_polygon(xy1, xy2, xy3, xy4): '''", "draw_text('Move with ← ↓ ↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300,", "game_state.state == 'dead': dead_text() if game_state.state == 'game_over': game_over_text() def dead_text(): draw_text('For continue", "direction. ''' if symbol == key.LEFT: game_state.direction = (-1, 0) if symbol ==", "text in playfield. ''' text = pyglet.text.Label( text, font_name='Arial', font_size=size, x=x, y=y, anchor_x=anchor_x)", "== key.UP: game_state.direction = (0, 1) if symbol == key.DOWN: game_state.direction = (0,", "key.ENTER: game_state.keys.append(('enter', 0)) def on_key_release(symbol, modifiers): ''' On key release. ''' if symbol", "= pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state = reset() window.push_handlers( on_draw=on_draw, on_key_press=on_key_press, ) pyglet.clock.schedule_interval(move, 1/30) pyglet.clock.schedule_interval(game_state.add_food,", "pyglet.window import key from images_load import batch from game_state import Game_state from field", "symbol == key.UP: game_state.direction = (0, 1) if symbol == key.DOWN: game_state.direction =", "key.UP: game_state.direction = (0, 1) if symbol == key.DOWN: game_state.direction = (0, -1)", "if symbol == key.DOWN: game_state.direction = (0, -1) if symbol == key.ENTER: game_state.keys.append(('enter',", "1) gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0] y2 =", "y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t eat walls or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes),", "from pyglet.window import key from images_load import batch from game_state import Game_state from", "x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def move(t): time_to_move[0]", "''' On key release. ''' if symbol == key.ENTER: game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT)", "and ('enter', 0) in game_state.keys: game_state.restart_conditions() time = max(0.7 - 0.05 * int(len(game_state.snake_xy))/3,", "Game_state() game_state.draw_snake_parts() return game_state def draw_polygon(xy1, xy2, xy3, xy4): ''' Draw polygon. '''", "x1 = game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1] draw_polygon((x1,", "gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def draw_text(text, x, y,", "0.2) time_to_move[0] = time def reset(): game_state = Game_state() game_state.draw_snake_parts() return game_state def", "game_state def draw_polygon(xy1, xy2, xy3, xy4): ''' Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1]));", "anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def game_over_text():", "== key.ENTER: game_state.keys.append(('enter', 0)) def on_key_release(symbol, modifiers): ''' On key release. ''' if", "↓ ↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16, anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left')", "in playfield. ''' text = pyglet.text.Label( text, font_name='Arial', font_size=size, x=x, y=y, anchor_x=anchor_x) text.draw()", "(x1, y2), (x2, y2), (x2, y1)) batch.draw() menu_text() if game_state.state == 'dead': dead_text()", "(-1, 0) if symbol == key.RIGHT: game_state.direction = (1, 0) if symbol ==", "= game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1, y1),", "y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left') draw_text(str(game_state.lifes), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left')", "snake direction. ''' if symbol == key.LEFT: game_state.direction = (-1, 0) if symbol", "if game_state.state == 'game_over' and ('enter', 0) in game_state.keys: game_state.restart_conditions() time = max(0.7", "x=x, y=y, anchor_x=anchor_x) text.draw() window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state = reset() window.push_handlers( on_draw=on_draw,", "game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1, y2), (x2, y2),", "= game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) batch.draw() menu_text() if", "game_state.restart_conditions() time = max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] = time def", "menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move with ← ↓ ↑ →',", "anchor_x='left') draw_text('Eat Apples', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t eat walls or yourself.', x=game_field.origin_xy0_menu[0]+300,", "-= t if time_to_move[0] < 0: game_state.move(t) if game_state.state == 'game_over' and ('enter',", "SNAKE GAME import pyglet from pyglet import gl from pyglet.window import key from", "gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def draw_text(text, x, y, size, anchor_x): ''' Draw text in", "key release. ''' if symbol == key.ENTER: game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1,", "game_field time_to_move = [0.7] def on_key_press(symbol, modifiers): ''' User press key for setting", "def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1) gl.glLineWidth(4) x1 = game_field.origin_xy0_game_field[0] y1 = game_field.origin_xy0_game_field[1]", "modifiers): ''' On key release. ''' if symbol == key.ENTER: game_state.keys.clear() def on_draw():", "anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30,", "== key.LEFT: game_state.direction = (-1, 0) if symbol == key.RIGHT: game_state.direction = (1,", "images_load import batch from game_state import Game_state from field import game_field time_to_move =", "pyglet from pyglet import gl from pyglet.window import key from images_load import batch", "-1) if symbol == key.ENTER: game_state.keys.append(('enter', 0)) def on_key_release(symbol, modifiers): ''' On key", "== key.DOWN: game_state.direction = (0, -1) if symbol == key.ENTER: game_state.keys.append(('enter', 0)) def", "import key from images_load import batch from game_state import Game_state from field import", "key.LEFT: game_state.direction = (-1, 0) if symbol == key.RIGHT: game_state.direction = (1, 0)", "game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1,", "dead_text() if game_state.state == 'game_over': game_over_text() def dead_text(): draw_text('For continue set right direction',", "y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def move(t): time_to_move[0] -=", "time_to_move[0] = time def reset(): game_state = Game_state() game_state.draw_snake_parts() return game_state def draw_polygon(xy1,", "Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd();", "y1 = game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0] y2 = game_field.origin_xy1_menu[1] draw_polygon((x1, y1), (x1, y2),", "size=30, anchor_x='left') def game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2,", "game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) x1", "set right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130,", "draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def move(t): time_to_move[0] -= t if time_to_move[0]", "y=y, anchor_x=anchor_x) text.draw() window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state = reset() window.push_handlers( on_draw=on_draw, on_key_press=on_key_press,", "User press key for setting snake direction. ''' if symbol == key.LEFT: game_state.direction", "''' if symbol == key.ENTER: game_state.keys.clear() def on_draw(): gl.glClear(gl.GL_COLOR_BUFFER_BIT) gl.glColor3f(1, 1, 1) gl.glLineWidth(4)", "from field import game_field time_to_move = [0.7] def on_key_press(symbol, modifiers): ''' User press", "time_to_move = [0.7] def on_key_press(symbol, modifiers): ''' User press key for setting snake", "= game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1)) x1 = game_field.origin_xy0_menu[0]", "[0.7] def on_key_press(symbol, modifiers): ''' User press key for setting snake direction. '''", "y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move with", "setting snake direction. ''' if symbol == key.LEFT: game_state.direction = (-1, 0) if", "''' if symbol == key.LEFT: game_state.direction = (-1, 0) if symbol == key.RIGHT:", "press key for setting snake direction. ''' if symbol == key.LEFT: game_state.direction =", "x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-65, size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def game_over_text(): draw_text('GAME OVER',", "== 'game_over' and ('enter', 0) in game_state.keys: game_state.restart_conditions() time = max(0.7 - 0.05", "gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def draw_text(text, x, y, size, anchor_x):", "== 'game_over': game_over_text() def dead_text(): draw_text('For continue set right direction', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30,", "batch.draw() menu_text() if game_state.state == 'dead': dead_text() if game_state.state == 'game_over': game_over_text() def", "game_state.direction = (0, -1) if symbol == key.ENTER: game_state.keys.append(('enter', 0)) def on_key_release(symbol, modifiers):", "gl from pyglet.window import key from images_load import batch from game_state import Game_state", "text.draw() window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state = reset() window.push_handlers( on_draw=on_draw, on_key_press=on_key_press, ) pyglet.clock.schedule_interval(move,", "= pyglet.text.Label( text, font_name='Arial', font_size=size, x=x, y=y, anchor_x=anchor_x) text.draw() window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1])", "0: game_state.move(t) if game_state.state == 'game_over' and ('enter', 0) in game_state.keys: game_state.restart_conditions() time", "draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') draw_text('Press ENTER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-140, size=20, anchor_x='center') def", "time = max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] = time def reset():", "xy4): ''' Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]),", "if symbol == key.LEFT: game_state.direction = (-1, 0) if symbol == key.RIGHT: game_state.direction", "size=30, anchor_x='left') draw_text(str(len(game_state.snake_xy)), x=game_field.origin_xy1_menu[0]-70, y=game_field.origin_xy1_menu[1]-115, size=30, anchor_x='left') def game_over_text(): draw_text('GAME OVER', x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100,", "font_size=size, x=x, y=y, anchor_x=anchor_x) text.draw() window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state = reset() window.push_handlers(", "x2 = game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2,", "anchor_x='center') def menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move with ← ↓", "gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]), int(xy2[1])); gl.glVertex2f(int(xy3[0]), int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def draw_text(text, x,", "x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-80, size=16, anchor_x='left') draw_text('Don\\'t eat walls or yourself.', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-110, size=16, anchor_x='left')", "0)) def on_key_release(symbol, modifiers): ''' On key release. ''' if symbol == key.ENTER:", "0) in game_state.keys: game_state.restart_conditions() time = max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2) time_to_move[0]", "def draw_polygon(xy1, xy2, xy3, xy4): ''' Draw polygon. ''' gl.glBegin(gl.GL_LINE_LOOP); gl.glVertex2f(int(xy1[0]), int(xy1[1])); gl.glVertex2f(int(xy2[0]),", "y1)) batch.draw() menu_text() if game_state.state == 'dead': dead_text() if game_state.state == 'game_over': game_over_text()", "(0, 1) if symbol == key.DOWN: game_state.direction = (0, -1) if symbol ==", "x=game_field.size_window()[0]//2, y=game_field.size_window()[1]//2-100, size=30, anchor_x='center') def menu_text(): draw_text('in Python', x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move", "''' text = pyglet.text.Label( text, font_name='Arial', font_size=size, x=x, y=y, anchor_x=anchor_x) text.draw() window =", "(x2, y1)) x1 = game_field.origin_xy0_menu[0] y1 = game_field.origin_xy0_menu[1] x2 = game_field.origin_xy1_menu[0] y2 =", "int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] = time def reset(): game_state = Game_state() game_state.draw_snake_parts() return game_state", "max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2) time_to_move[0] = time def reset(): game_state =", "text, font_name='Arial', font_size=size, x=x, y=y, anchor_x=anchor_x) text.draw() window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state =", "anchor_x=anchor_x) text.draw() window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1]) game_state = reset() window.push_handlers( on_draw=on_draw, on_key_press=on_key_press, )", "import pyglet from pyglet import gl from pyglet.window import key from images_load import", "= (0, 1) if symbol == key.DOWN: game_state.direction = (0, -1) if symbol", "batch from game_state import Game_state from field import game_field time_to_move = [0.7] def", "int(xy3[1])); gl.glVertex2f(int(xy4[0]), int(xy4[1])); gl.glEnd(); def draw_text(text, x, y, size, anchor_x): ''' Draw text", "= game_field.origin_xy0_game_field[1] x2 = game_field.origin_xy1_game_field[0] y2 = game_field.origin_xy1_game_field[1] draw_polygon((x1, y1), (x1, y2), (x2,", "x=game_field.origin_xy0_menu[0]+25, y=game_field.origin_xy1_menu[1]-130, size=16, anchor_x='left') draw_text('Move with ← ↓ ↑ →', x=game_field.origin_xy0_menu[0]+300, y=game_field.origin_xy1_menu[1]-50, size=16," ]
[ "sys.argv[1] output_data_dir = sys.argv[2] annotator = mung.nlp.corenlp.CoreNLPAnnotator('$.[state, utterance]', 'contents', 'nlp') annotator.annotate_directory(input_data_dir, output_data_dir, id_key=\"id\",", "= sys.argv[1] output_data_dir = sys.argv[2] annotator = mung.nlp.corenlp.CoreNLPAnnotator('$.[state, utterance]', 'contents', 'nlp') annotator.annotate_directory(input_data_dir, output_data_dir,", "<reponame>forkunited/ltprg import sys import mung.nlp.corenlp input_data_dir = sys.argv[1] output_data_dir = sys.argv[2] annotator =", "import mung.nlp.corenlp input_data_dir = sys.argv[1] output_data_dir = sys.argv[2] annotator = mung.nlp.corenlp.CoreNLPAnnotator('$.[state, utterance]', 'contents',", "sys import mung.nlp.corenlp input_data_dir = sys.argv[1] output_data_dir = sys.argv[2] annotator = mung.nlp.corenlp.CoreNLPAnnotator('$.[state, utterance]',", "input_data_dir = sys.argv[1] output_data_dir = sys.argv[2] annotator = mung.nlp.corenlp.CoreNLPAnnotator('$.[state, utterance]', 'contents', 'nlp') annotator.annotate_directory(input_data_dir,", "output_data_dir = sys.argv[2] annotator = mung.nlp.corenlp.CoreNLPAnnotator('$.[state, utterance]', 'contents', 'nlp') annotator.annotate_directory(input_data_dir, output_data_dir, id_key=\"id\", batch=100)", "mung.nlp.corenlp input_data_dir = sys.argv[1] output_data_dir = sys.argv[2] annotator = mung.nlp.corenlp.CoreNLPAnnotator('$.[state, utterance]', 'contents', 'nlp')", "import sys import mung.nlp.corenlp input_data_dir = sys.argv[1] output_data_dir = sys.argv[2] annotator = mung.nlp.corenlp.CoreNLPAnnotator('$.[state," ]
[ "path) self.notifier = Notifier(self._bus) self._auto = False self._als = AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus)", "self._auto @auto.setter def auto(self, value): self._auto = value self.notifier.auto_brightness(self._auto) if self._auto: self._als.start() self._br_ctrl.start()", "brightnessctrl import BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def __init__(self): path = '/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True)", "self.notifier = Notifier(self._bus) self._auto = False self._als = AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer", "import sys import signal from PyQt4 import QtCore from dbus.mainloop.qt import DBusQtMainLoop from", "signal from PyQt4 import QtCore from dbus.mainloop.qt import DBusQtMainLoop from notifier import Notifier", "= value self.notifier.auto_brightness(self._auto) if self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def", "= QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def auto(self): return self._auto @auto.setter def auto(self, value): self._auto", "python import dbus import dbus.service import sys import signal from PyQt4 import QtCore", "= self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self):", "self._bus = dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name, path) self.notifier = Notifier(self._bus)", "def up(self): value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value)", "auto_toggle(self): self.auto = not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit() class Application(QtCore.QCoreApplication): def __init__(self,", "from PyQt4 import QtCore from dbus.mainloop.qt import DBusQtMainLoop from notifier import Notifier from", "self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def auto(self): return self._auto @auto.setter", "app = Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT, lambda *args: app.quit()) sys.exit(app.exec_()) if __name__ == \"__main__\":", "from dbus.mainloop.qt import DBusQtMainLoop from notifier import Notifier from als import AmbientLightSensor from", "= self._als.get_value() print('Light sensor: %d' % value) if value == 0: value =", "= BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def auto(self): return self._auto @auto.setter def", "= Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT, lambda *args: app.quit()) sys.exit(app.exec_()) if __name__ == \"__main__\": main()", "up(self): value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')", "return super(Application, self).event(e) def quit(self): self._auto_br.stop() super(Application, self).quit() def main(): app = Application(sys.argv)", "DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name, path) self.notifier =", "Notifier(self._bus) self._auto = False self._als = AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer()", "self._process_timer.stop() def process(self): value = self._als.get_value() print('Light sensor: %d' % value) if value", "class Application(QtCore.QCoreApplication): def __init__(self, argv): super(Application, self).__init__(argv) self._auto_br = AutoBrightnessService() def event(self, e):", "super(Application, self).__init__(argv) self._auto_br = AutoBrightnessService() def event(self, e): return super(Application, self).event(e) def quit(self):", "= not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit() class Application(QtCore.QCoreApplication): def __init__(self, argv): super(Application,", "AutoBrightnessService() def event(self, e): return super(Application, self).event(e) def quit(self): self._auto_br.stop() super(Application, self).quit() def", "def auto(self, value): self._auto = value self.notifier.auto_brightness(self._auto) if self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else:", "down(self): value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto = not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')", "stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def", "argv): super(Application, self).__init__(argv) self._auto_br = AutoBrightnessService() def event(self, e): return super(Application, self).event(e) def", "Application(QtCore.QCoreApplication): def __init__(self, argv): super(Application, self).__init__(argv) self._auto_br = AutoBrightnessService() def event(self, e): return", "self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self):", "def __init__(self, argv): super(Application, self).__init__(argv) self._auto_br = AutoBrightnessService() def event(self, e): return super(Application,", "super(Application, self).event(e) def quit(self): self._auto_br.stop() super(Application, self).quit() def main(): app = Application(sys.argv) app.startTimer(1000)", "self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def process(self): value = self._als.get_value() print('Light", "if self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def process(self): value =", "bus_loop = DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name, path)", "self._als.get_value() print('Light sensor: %d' % value) if value == 0: value = 1", "value) if value == 0: value = 1 self._br_ctrl.set_screen_brightness(value) if value < 5:", "self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def process(self): value = self._als.get_value() print('Light sensor:", "def __init__(self): path = '/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop) name =", "print('Light sensor: %d' % value) if value == 0: value = 1 self._br_ctrl.set_screen_brightness(value)", "= AutoBrightnessService() def event(self, e): return super(Application, self).event(e) def quit(self): self._auto_br.stop() super(Application, self).quit()", "self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def process(self): value = self._als.get_value()", "'/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name,", "0: value = 1 self._br_ctrl.set_screen_brightness(value) if value < 5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def", "self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value =", "= False self._als = AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property", "self._auto_br.stop() super(Application, self).quit() def main(): app = Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT, lambda *args: app.quit())", "class AutoBrightnessService(dbus.service.Object): def __init__(self): path = '/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop)", "e): return super(Application, self).event(e) def quit(self): self._auto_br.stop() super(Application, self).quit() def main(): app =", "__init__(self): path = '/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness',", "self).event(e) def quit(self): self._auto_br.stop() super(Application, self).quit() def main(): app = Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT,", "def exit(self): sys.exit() class Application(QtCore.QCoreApplication): def __init__(self, argv): super(Application, self).__init__(argv) self._auto_br = AutoBrightnessService()", "%d' % value) if value == 0: value = 1 self._br_ctrl.set_screen_brightness(value) if value", "sys.exit() class Application(QtCore.QCoreApplication): def __init__(self, argv): super(Application, self).__init__(argv) self._auto_br = AutoBrightnessService() def event(self,", "self._auto = False self._als = AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process)", "return self._auto @auto.setter def auto(self, value): self._auto = value self.notifier.auto_brightness(self._auto) if self._auto: self._als.start()", "AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def auto(self): return self._auto", "self._process_timer.timeout.connect(self.process) @property def auto(self): return self._auto @auto.setter def auto(self, value): self._auto = value", "#!/usr/bin/env python import dbus import dbus.service import sys import signal from PyQt4 import", "dbus.service import sys import signal from PyQt4 import QtCore from dbus.mainloop.qt import DBusQtMainLoop", "from brightnessctrl import BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def __init__(self): path = '/com/github/sheinz/autobrightness' bus_loop =", "import BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def __init__(self): path = '/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True) self._bus", "@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit() class Application(QtCore.QCoreApplication): def __init__(self, argv): super(Application, self).__init__(argv) self._auto_br =", "auto(self, value): self._auto = value self.notifier.auto_brightness(self._auto) if self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop()", "self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value)", "value < 5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def", "self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit() class Application(QtCore.QCoreApplication): def __init__(self, argv): super(Application, self).__init__(argv) self._auto_br", "= dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name, path) self.notifier = Notifier(self._bus) self._auto", "= DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name, path) self.notifier", "sys import signal from PyQt4 import QtCore from dbus.mainloop.qt import DBusQtMainLoop from notifier", "% value) if value == 0: value = 1 self._br_ctrl.set_screen_brightness(value) if value <", "self._br_ctrl.set_screen_brightness(value) if value < 5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop()", "def auto(self): return self._auto @auto.setter def auto(self, value): self._auto = value self.notifier.auto_brightness(self._auto) if", "@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto = not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit() class Application(QtCore.QCoreApplication):", "BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def auto(self): return self._auto @auto.setter def auto(self,", "event(self, e): return super(Application, self).event(e) def quit(self): self._auto_br.stop() super(Application, self).quit() def main(): app", "self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def process(self): value = self._als.get_value() print('Light sensor: %d' % value)", "process(self): value = self._als.get_value() print('Light sensor: %d' % value) if value == 0:", "@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto = not", "dbus.mainloop.qt import DBusQtMainLoop from notifier import Notifier from als import AmbientLightSensor from brightnessctrl", "@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value = self._br_ctrl.screen_brightness_down()", "self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto = not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit() class", "self).quit() def main(): app = Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT, lambda *args: app.quit()) sys.exit(app.exec_()) if", "import AmbientLightSensor from brightnessctrl import BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def __init__(self): path = '/com/github/sheinz/autobrightness'", "DBusQtMainLoop from notifier import Notifier from als import AmbientLightSensor from brightnessctrl import BrightnessCtrl", "< 5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self):", "self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def auto(self): return self._auto @auto.setter def auto(self, value):", "else: self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value = self._br_ctrl.screen_brightness_up()", "AutoBrightnessService(dbus.service.Object): def __init__(self): path = '/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop) name", "self).__init__(argv) self._auto_br = AutoBrightnessService() def event(self, e): return super(Application, self).event(e) def quit(self): self._auto_br.stop()", "@auto.setter def auto(self, value): self._auto = value self.notifier.auto_brightness(self._auto) if self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000)", "import QtCore from dbus.mainloop.qt import DBusQtMainLoop from notifier import Notifier from als import", "value = 1 self._br_ctrl.set_screen_brightness(value) if value < 5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def stop(self):", "1 self._br_ctrl.set_screen_brightness(value) if value < 5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop() self._als.stop()", "def auto_toggle(self): self.auto = not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit() class Application(QtCore.QCoreApplication): def", "= AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def auto(self): return", "def down(self): value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto = not self.auto", "= '/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self,", "notifier import Notifier from als import AmbientLightSensor from brightnessctrl import BrightnessCtrl class AutoBrightnessService(dbus.service.Object):", "path = '/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True) self._bus = dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus)", "als import AmbientLightSensor from brightnessctrl import BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def __init__(self): path =", "import dbus.service import sys import signal from PyQt4 import QtCore from dbus.mainloop.qt import", "self._als = AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def auto(self):", "from notifier import Notifier from als import AmbientLightSensor from brightnessctrl import BrightnessCtrl class", "def stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')", "= dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name, path) self.notifier = Notifier(self._bus) self._auto = False self._als", "value == 0: value = 1 self._br_ctrl.set_screen_brightness(value) if value < 5: self._br_ctrl.set_keyboard_light(True) else:", "False self._als = AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer = QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def", "== 0: value = 1 self._br_ctrl.set_screen_brightness(value) if value < 5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False)", "dbus.service.Object.__init__(self, name, path) self.notifier = Notifier(self._bus) self._auto = False self._als = AmbientLightSensor() self._br_ctrl", "value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto = not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def", "value): self._auto = value self.notifier.auto_brightness(self._auto) if self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop()", "def process(self): value = self._als.get_value() print('Light sensor: %d' % value) if value ==", "self._br_ctrl.stop() self._process_timer.stop() def process(self): value = self._als.get_value() print('Light sensor: %d' % value) if", "__init__(self, argv): super(Application, self).__init__(argv) self._auto_br = AutoBrightnessService() def event(self, e): return super(Application, self).event(e)", "auto(self): return self._auto @auto.setter def auto(self, value): self._auto = value self.notifier.auto_brightness(self._auto) if self._auto:", "main(): app = Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT, lambda *args: app.quit()) sys.exit(app.exec_()) if __name__ ==", "@property def auto(self): return self._auto @auto.setter def auto(self, value): self._auto = value self.notifier.auto_brightness(self._auto)", "self._auto_br = AutoBrightnessService() def event(self, e): return super(Application, self).event(e) def quit(self): self._auto_br.stop() super(Application,", "QtCore from dbus.mainloop.qt import DBusQtMainLoop from notifier import Notifier from als import AmbientLightSensor", "AmbientLightSensor from brightnessctrl import BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def __init__(self): path = '/com/github/sheinz/autobrightness' bus_loop", "else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def process(self): value = self._als.get_value() print('Light sensor: %d' %", "5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value", "self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value", "= self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto = not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self):", "value = self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def", "not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit() class Application(QtCore.QCoreApplication): def __init__(self, argv): super(Application, self).__init__(argv)", "exit(self): sys.exit() class Application(QtCore.QCoreApplication): def __init__(self, argv): super(Application, self).__init__(argv) self._auto_br = AutoBrightnessService() def", "self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def process(self): value = self._als.get_value() print('Light sensor: %d'", "def event(self, e): return super(Application, self).event(e) def quit(self): self._auto_br.stop() super(Application, self).quit() def main():", "BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def __init__(self): path = '/com/github/sheinz/autobrightness' bus_loop = DBusQtMainLoop(set_as_default=True) self._bus =", "Notifier from als import AmbientLightSensor from brightnessctrl import BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def __init__(self):", "= 1 self._br_ctrl.set_screen_brightness(value) if value < 5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop()", "self.notifier.auto_brightness(self._auto) if self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def process(self): value", "dbus import dbus.service import sys import signal from PyQt4 import QtCore from dbus.mainloop.qt", "dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name, path) self.notifier = Notifier(self._bus) self._auto = False self._als =", "super(Application, self).quit() def main(): app = Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT, lambda *args: app.quit()) sys.exit(app.exec_())", "value self.notifier.auto_brightness(self._auto) if self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop() def process(self):", "PyQt4 import QtCore from dbus.mainloop.qt import DBusQtMainLoop from notifier import Notifier from als", "quit(self): self._auto_br.stop() super(Application, self).quit() def main(): app = Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT, lambda *args:", "dbus.SessionBus(mainloop=bus_loop) name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name, path) self.notifier = Notifier(self._bus) self._auto =", "name = dbus.service.BusName('com.github.sheinz.autobrightness', bus=self._bus) dbus.service.Object.__init__(self, name, path) self.notifier = Notifier(self._bus) self._auto = False", "sensor: %d' % value) if value == 0: value = 1 self._br_ctrl.set_screen_brightness(value) if", "bus=self._bus) dbus.service.Object.__init__(self, name, path) self.notifier = Notifier(self._bus) self._auto = False self._als = AmbientLightSensor()", "def quit(self): self._auto_br.stop() super(Application, self).quit() def main(): app = Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT, lambda", "name, path) self.notifier = Notifier(self._bus) self._auto = False self._als = AmbientLightSensor() self._br_ctrl =", "self._br_ctrl.screen_brightness_up() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto", "self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto = not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit()", "= Notifier(self._bus) self._auto = False self._als = AmbientLightSensor() self._br_ctrl = BrightnessCtrl(self._bus) self._process_timer =", "value = self._als.get_value() print('Light sensor: %d' % value) if value == 0: value", "self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def up(self): value =", "from als import AmbientLightSensor from brightnessctrl import BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def __init__(self): path", "import DBusQtMainLoop from notifier import Notifier from als import AmbientLightSensor from brightnessctrl import", "import Notifier from als import AmbientLightSensor from brightnessctrl import BrightnessCtrl class AutoBrightnessService(dbus.service.Object): def", "self._auto = value self.notifier.auto_brightness(self._auto) if self._auto: self._als.start() self._br_ctrl.start() self._process_timer.start(1000) else: self._als.stop() self._br_ctrl.stop() self._process_timer.stop()", "import dbus import dbus.service import sys import signal from PyQt4 import QtCore from", "self.auto = not self.auto @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def exit(self): sys.exit() class Application(QtCore.QCoreApplication): def __init__(self, argv):", "def main(): app = Application(sys.argv) app.startTimer(1000) signal.signal(signal.SIGINT, lambda *args: app.quit()) sys.exit(app.exec_()) if __name__", "if value == 0: value = 1 self._br_ctrl.set_screen_brightness(value) if value < 5: self._br_ctrl.set_keyboard_light(True)", "self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def down(self): value = self._br_ctrl.screen_brightness_down() self.notifier.brightness(value) @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness') def auto_toggle(self): self.auto =", "QtCore.QTimer() self._process_timer.timeout.connect(self.process) @property def auto(self): return self._auto @auto.setter def auto(self, value): self._auto =", "if value < 5: self._br_ctrl.set_keyboard_light(True) else: self._br_ctrl.set_keyboard_light(False) def stop(self): self._process_timer.stop() self._als.stop() self._br_ctrl.stop() @dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')", "import signal from PyQt4 import QtCore from dbus.mainloop.qt import DBusQtMainLoop from notifier import" ]
[ "code:: @cache(times=3) def some_function(): pass Would give out cached value up to `times`", "def some_function(): pass Would give out cached value up to `times` number only.", "value up to `times` number only. Example:: @cache(times=2) def f(): return input('? ')", "previous value '1' >> f() # but use it up to two times", ">> f() # but use it up to two times only '1' >>", "you wrote a cache function that remembers other function output value. Modify it", "in cached_values: cached_values[key] = [func(*args, **kwargs), times+1] if cached_values[key][1] > 1: cached_values[key][1] -=", "cached_values[key] = [func(*args, **kwargs), times+1] if cached_values[key][1] > 1: cached_values[key][1] -= 1 return", "Would give out cached value up to `times` number only. Example:: @cache(times=2) def", "{} def _cache(func: Callable) -> Callable: def wrapper(*args, **kwargs): bound = inspect.signature(func).bind(*args, **kwargs)", "f(): return input('? ') # careful with input() in python2, use raw_input() instead", "in python2, use raw_input() instead >> f() ? 1 '1' >> f() #", "remember previous value '1' >> f() # but use it up to two", "? 1 '1' >> f() # will remember previous value '1' >> f()", "str(bound.arguments) if key not in cached_values: cached_values[key] = [func(*args, **kwargs), times+1] if cached_values[key][1]", "Callable) -> Callable: def wrapper(*args, **kwargs): bound = inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key =", "= str(bound.arguments) if key not in cached_values: cached_values[key] = [func(*args, **kwargs), times+1] if", "two times only '1' >> f() ? 2 '2' \"\"\" import inspect from", "cached_values[key][1] -= 1 return cached_values[key][0] result = cached_values[key][0] del cached_values[key] return result return", "use raw_input() instead >> f() ? 1 '1' >> f() # will remember", "up to two times only '1' >> f() ? 2 '2' \"\"\" import", "# but use it up to two times only '1' >> f() ?", "= inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key = str(bound.arguments) if key not in cached_values: cached_values[key]", "use it up to two times only '1' >> f() ? 2 '2'", "the following code:: @cache(times=3) def some_function(): pass Would give out cached value up", "it to be a parametrized decorator, so that the following code:: @cache(times=3) def", "only. Example:: @cache(times=2) def f(): return input('? ') # careful with input() in", "\"\"\" import inspect from typing import Callable def cache(times: int) -> Callable: \"\"\"Cache", "'1' >> f() ? 2 '2' \"\"\" import inspect from typing import Callable", "f() # but use it up to two times only '1' >> f()", "give out cached value up to `times` number only. Example:: @cache(times=2) def f():", "parametrized decorator, so that the following code:: @cache(times=3) def some_function(): pass Would give", "bound.apply_defaults() key = str(bound.arguments) if key not in cached_values: cached_values[key] = [func(*args, **kwargs),", "value '1' >> f() # but use it up to two times only", "if key not in cached_values: cached_values[key] = [func(*args, **kwargs), times+1] if cached_values[key][1] >", "return input('? ') # careful with input() in python2, use raw_input() instead >>", "python2, use raw_input() instead >> f() ? 1 '1' >> f() # will", "wrote a cache function that remembers other function output value. Modify it to", "4, you wrote a cache function that remembers other function output value. Modify", "following code:: @cache(times=3) def some_function(): pass Would give out cached value up to", "decorator, so that the following code:: @cache(times=3) def some_function(): pass Would give out", "import Callable def cache(times: int) -> Callable: \"\"\"Cache decorator which returns func result", "1 '1' >> f() # will remember previous value '1' >> f() #", "f() # will remember previous value '1' >> f() # but use it", "2 '2' \"\"\" import inspect from typing import Callable def cache(times: int) ->", "def cache(times: int) -> Callable: \"\"\"Cache decorator which returns func result n times\"\"\"", "return cached_values[key][0] result = cached_values[key][0] del cached_values[key] return result return wrapper return _cache", "cached_values[key][1] > 1: cached_values[key][1] -= 1 return cached_values[key][0] result = cached_values[key][0] del cached_values[key]", "up to `times` number only. Example:: @cache(times=2) def f(): return input('? ') #", "with input() in python2, use raw_input() instead >> f() ? 1 '1' >>", "[func(*args, **kwargs), times+1] if cached_values[key][1] > 1: cached_values[key][1] -= 1 return cached_values[key][0] result", "-= 1 return cached_values[key][0] result = cached_values[key][0] del cached_values[key] return result return wrapper", "times+1] if cached_values[key][1] > 1: cached_values[key][1] -= 1 return cached_values[key][0] result = cached_values[key][0]", "> 1: cached_values[key][1] -= 1 return cached_values[key][0] result = cached_values[key][0] del cached_values[key] return", "def wrapper(*args, **kwargs): bound = inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key = str(bound.arguments) if key", "= {} def _cache(func: Callable) -> Callable: def wrapper(*args, **kwargs): bound = inspect.signature(func).bind(*args,", "\"\"\"Cache decorator which returns func result n times\"\"\" cached_values = {} def _cache(func:", "= [func(*args, **kwargs), times+1] if cached_values[key][1] > 1: cached_values[key][1] -= 1 return cached_values[key][0]", "cached_values = {} def _cache(func: Callable) -> Callable: def wrapper(*args, **kwargs): bound =", "times\"\"\" cached_values = {} def _cache(func: Callable) -> Callable: def wrapper(*args, **kwargs): bound", "will remember previous value '1' >> f() # but use it up to", "remembers other function output value. Modify it to be a parametrized decorator, so", "output value. Modify it to be a parametrized decorator, so that the following", "some_function(): pass Would give out cached value up to `times` number only. Example::", "f() ? 1 '1' >> f() # will remember previous value '1' >>", "to `times` number only. Example:: @cache(times=2) def f(): return input('? ') # careful", "def _cache(func: Callable) -> Callable: def wrapper(*args, **kwargs): bound = inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults()", "homework task 4, you wrote a cache function that remembers other function output", "input('? ') # careful with input() in python2, use raw_input() instead >> f()", "typing import Callable def cache(times: int) -> Callable: \"\"\"Cache decorator which returns func", "@cache(times=3) def some_function(): pass Would give out cached value up to `times` number", "func result n times\"\"\" cached_values = {} def _cache(func: Callable) -> Callable: def", "if cached_values[key][1] > 1: cached_values[key][1] -= 1 return cached_values[key][0] result = cached_values[key][0] del", "to be a parametrized decorator, so that the following code:: @cache(times=3) def some_function():", "careful with input() in python2, use raw_input() instead >> f() ? 1 '1'", "decorator which returns func result n times\"\"\" cached_values = {} def _cache(func: Callable)", "f() ? 2 '2' \"\"\" import inspect from typing import Callable def cache(times:", "# will remember previous value '1' >> f() # but use it up", "from typing import Callable def cache(times: int) -> Callable: \"\"\"Cache decorator which returns", "Modify it to be a parametrized decorator, so that the following code:: @cache(times=3)", "result n times\"\"\" cached_values = {} def _cache(func: Callable) -> Callable: def wrapper(*args,", "Callable: \"\"\"Cache decorator which returns func result n times\"\"\" cached_values = {} def", "-> Callable: def wrapper(*args, **kwargs): bound = inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key = str(bound.arguments)", "to two times only '1' >> f() ? 2 '2' \"\"\" import inspect", "key not in cached_values: cached_values[key] = [func(*args, **kwargs), times+1] if cached_values[key][1] > 1:", "def f(): return input('? ') # careful with input() in python2, use raw_input()", "bound = inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key = str(bound.arguments) if key not in cached_values:", ">> f() ? 1 '1' >> f() # will remember previous value '1'", "function output value. Modify it to be a parametrized decorator, so that the", "# careful with input() in python2, use raw_input() instead >> f() ? 1", "? 2 '2' \"\"\" import inspect from typing import Callable def cache(times: int)", "function that remembers other function output value. Modify it to be a parametrized", "import inspect from typing import Callable def cache(times: int) -> Callable: \"\"\"Cache decorator", "so that the following code:: @cache(times=3) def some_function(): pass Would give out cached", "'1' >> f() # but use it up to two times only '1'", "Example:: @cache(times=2) def f(): return input('? ') # careful with input() in python2,", "cached_values: cached_values[key] = [func(*args, **kwargs), times+1] if cached_values[key][1] > 1: cached_values[key][1] -= 1", "inspect from typing import Callable def cache(times: int) -> Callable: \"\"\"Cache decorator which", "out cached value up to `times` number only. Example:: @cache(times=2) def f(): return", "In previous homework task 4, you wrote a cache function that remembers other", "') # careful with input() in python2, use raw_input() instead >> f() ?", "but use it up to two times only '1' >> f() ? 2", "`times` number only. Example:: @cache(times=2) def f(): return input('? ') # careful with", "**kwargs): bound = inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key = str(bound.arguments) if key not in", "number only. Example:: @cache(times=2) def f(): return input('? ') # careful with input()", "pass Would give out cached value up to `times` number only. Example:: @cache(times=2)", "inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key = str(bound.arguments) if key not in cached_values: cached_values[key] =", "that remembers other function output value. Modify it to be a parametrized decorator,", "a parametrized decorator, so that the following code:: @cache(times=3) def some_function(): pass Would", "not in cached_values: cached_values[key] = [func(*args, **kwargs), times+1] if cached_values[key][1] > 1: cached_values[key][1]", "only '1' >> f() ? 2 '2' \"\"\" import inspect from typing import", "value. Modify it to be a parametrized decorator, so that the following code::", "raw_input() instead >> f() ? 1 '1' >> f() # will remember previous", "cached value up to `times` number only. Example:: @cache(times=2) def f(): return input('?", "1: cached_values[key][1] -= 1 return cached_values[key][0] result = cached_values[key][0] del cached_values[key] return result", "returns func result n times\"\"\" cached_values = {} def _cache(func: Callable) -> Callable:", "wrapper(*args, **kwargs): bound = inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key = str(bound.arguments) if key not", "cache(times: int) -> Callable: \"\"\"Cache decorator which returns func result n times\"\"\" cached_values", "previous homework task 4, you wrote a cache function that remembers other function", "other function output value. Modify it to be a parametrized decorator, so that", "n times\"\"\" cached_values = {} def _cache(func: Callable) -> Callable: def wrapper(*args, **kwargs):", "a cache function that remembers other function output value. Modify it to be", "'1' >> f() # will remember previous value '1' >> f() # but", "task 4, you wrote a cache function that remembers other function output value.", "be a parametrized decorator, so that the following code:: @cache(times=3) def some_function(): pass", "_cache(func: Callable) -> Callable: def wrapper(*args, **kwargs): bound = inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key", "'2' \"\"\" import inspect from typing import Callable def cache(times: int) -> Callable:", "which returns func result n times\"\"\" cached_values = {} def _cache(func: Callable) ->", "int) -> Callable: \"\"\"Cache decorator which returns func result n times\"\"\" cached_values =", "**kwargs), times+1] if cached_values[key][1] > 1: cached_values[key][1] -= 1 return cached_values[key][0] result =", "that the following code:: @cache(times=3) def some_function(): pass Would give out cached value", "@cache(times=2) def f(): return input('? ') # careful with input() in python2, use", "**kwargs) bound.apply_defaults() key = str(bound.arguments) if key not in cached_values: cached_values[key] = [func(*args,", "cache function that remembers other function output value. Modify it to be a", "instead >> f() ? 1 '1' >> f() # will remember previous value", ">> f() # will remember previous value '1' >> f() # but use", "-> Callable: \"\"\"Cache decorator which returns func result n times\"\"\" cached_values = {}", ">> f() ? 2 '2' \"\"\" import inspect from typing import Callable def", "input() in python2, use raw_input() instead >> f() ? 1 '1' >> f()", "Callable: def wrapper(*args, **kwargs): bound = inspect.signature(func).bind(*args, **kwargs) bound.apply_defaults() key = str(bound.arguments) if", "times only '1' >> f() ? 2 '2' \"\"\" import inspect from typing", "1 return cached_values[key][0] result = cached_values[key][0] del cached_values[key] return result return wrapper return", "Callable def cache(times: int) -> Callable: \"\"\"Cache decorator which returns func result n", "\"\"\" In previous homework task 4, you wrote a cache function that remembers", "it up to two times only '1' >> f() ? 2 '2' \"\"\"", "key = str(bound.arguments) if key not in cached_values: cached_values[key] = [func(*args, **kwargs), times+1]" ]
[ "time import createVm import main import popup import selectableGrid import virtualMachine class ImageGrid(selectableGrid.SelectableGrid):", ": {'ImageNames' : [name_filter]}} if name_filter is not None else {} groups =", "name_filter is not None else {} groups = main.GATEWAY.ReadImages(**filter)['Images'] values = list() for", "for g in groups: values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias'] if 'AccountAlias' in g", "def refresh(self, name_filter=None): filter = {'Filters' : {'ImageNames' : [name_filter]}} if name_filter is", "ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen, *args, **keywords): super().__init__(screen, *args, **keywords) self.col_titles = [\"Name\", \"Id\",", "self.col_titles = [\"Name\", \"Id\", \"Description\", \"Type\", \"Owner\"] def on_selection(line): popup.editImage(self.form, line) self.on_selection =", "= [\"Name\", \"Id\", \"Description\", \"Type\", \"Owner\"] def on_selection(line): popup.editImage(self.form, line) self.on_selection = on_selection", "def on_selection(line): popup.editImage(self.form, line) self.on_selection = on_selection def refresh(self, name_filter=None): filter = {'Filters'", "g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias'] if 'AccountAlias' in g else \"Me\"]) self.values = values", "import selectableGrid import virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen, *args, **keywords): super().__init__(screen, *args,", "in groups: values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias'] if 'AccountAlias' in g else \"Me\"])", "**keywords): super().__init__(screen, *args, **keywords) self.col_titles = [\"Name\", \"Id\", \"Description\", \"Type\", \"Owner\"] def on_selection(line):", "**keywords) self.col_titles = [\"Name\", \"Id\", \"Description\", \"Type\", \"Owner\"] def on_selection(line): popup.editImage(self.form, line) self.on_selection", "createVm import main import popup import selectableGrid import virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def __init__(self,", "def __init__(self, screen, *args, **keywords): super().__init__(screen, *args, **keywords) self.col_titles = [\"Name\", \"Id\", \"Description\",", "popup.editImage(self.form, line) self.on_selection = on_selection def refresh(self, name_filter=None): filter = {'Filters' : {'ImageNames'", "on_selection(line): popup.editImage(self.form, line) self.on_selection = on_selection def refresh(self, name_filter=None): filter = {'Filters' :", "[\"Name\", \"Id\", \"Description\", \"Type\", \"Owner\"] def on_selection(line): popup.editImage(self.form, line) self.on_selection = on_selection def", "[name_filter]}} if name_filter is not None else {} groups = main.GATEWAY.ReadImages(**filter)['Images'] values =", "\"Owner\"] def on_selection(line): popup.editImage(self.form, line) self.on_selection = on_selection def refresh(self, name_filter=None): filter =", "if name_filter is not None else {} groups = main.GATEWAY.ReadImages(**filter)['Images'] values = list()", "list() for g in groups: values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias'] if 'AccountAlias' in", "\"Id\", \"Description\", \"Type\", \"Owner\"] def on_selection(line): popup.editImage(self.form, line) self.on_selection = on_selection def refresh(self,", "on_selection def refresh(self, name_filter=None): filter = {'Filters' : {'ImageNames' : [name_filter]}} if name_filter", "{} groups = main.GATEWAY.ReadImages(**filter)['Images'] values = list() for g in groups: values.append([g['ImageName'], g['ImageId'],", "super().__init__(screen, *args, **keywords) self.col_titles = [\"Name\", \"Id\", \"Description\", \"Type\", \"Owner\"] def on_selection(line): popup.editImage(self.form,", "self.on_selection = on_selection def refresh(self, name_filter=None): filter = {'Filters' : {'ImageNames' : [name_filter]}}", "class ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen, *args, **keywords): super().__init__(screen, *args, **keywords) self.col_titles = [\"Name\",", "values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias'] if 'AccountAlias' in g else \"Me\"]) self.values =", "virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen, *args, **keywords): super().__init__(screen, *args, **keywords) self.col_titles =", "<reponame>outscale-mdr/osc-tui import npyscreen import pyperclip import time import createVm import main import popup", "= main.GATEWAY.ReadImages(**filter)['Images'] values = list() for g in groups: values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'],", "import pyperclip import time import createVm import main import popup import selectableGrid import", "\"Description\", \"Type\", \"Owner\"] def on_selection(line): popup.editImage(self.form, line) self.on_selection = on_selection def refresh(self, name_filter=None):", "groups: values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias'] if 'AccountAlias' in g else \"Me\"]) self.values", "import main import popup import selectableGrid import virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen,", "is not None else {} groups = main.GATEWAY.ReadImages(**filter)['Images'] values = list() for g", "pyperclip import time import createVm import main import popup import selectableGrid import virtualMachine", "refresh(self, name_filter=None): filter = {'Filters' : {'ImageNames' : [name_filter]}} if name_filter is not", "import time import createVm import main import popup import selectableGrid import virtualMachine class", "*args, **keywords): super().__init__(screen, *args, **keywords) self.col_titles = [\"Name\", \"Id\", \"Description\", \"Type\", \"Owner\"] def", "main.GATEWAY.ReadImages(**filter)['Images'] values = list() for g in groups: values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias']", "*args, **keywords) self.col_titles = [\"Name\", \"Id\", \"Description\", \"Type\", \"Owner\"] def on_selection(line): popup.editImage(self.form, line)", "import virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen, *args, **keywords): super().__init__(screen, *args, **keywords) self.col_titles", "name_filter=None): filter = {'Filters' : {'ImageNames' : [name_filter]}} if name_filter is not None", "npyscreen import pyperclip import time import createVm import main import popup import selectableGrid", "popup import selectableGrid import virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen, *args, **keywords): super().__init__(screen,", "= list() for g in groups: values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias'] if 'AccountAlias'", "import popup import selectableGrid import virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen, *args, **keywords):", "\"Type\", \"Owner\"] def on_selection(line): popup.editImage(self.form, line) self.on_selection = on_selection def refresh(self, name_filter=None): filter", "= on_selection def refresh(self, name_filter=None): filter = {'Filters' : {'ImageNames' : [name_filter]}} if", "selectableGrid import virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen, *args, **keywords): super().__init__(screen, *args, **keywords)", "not None else {} groups = main.GATEWAY.ReadImages(**filter)['Images'] values = list() for g in", "groups = main.GATEWAY.ReadImages(**filter)['Images'] values = list() for g in groups: values.append([g['ImageName'], g['ImageId'], g['Description'],", "__init__(self, screen, *args, **keywords): super().__init__(screen, *args, **keywords) self.col_titles = [\"Name\", \"Id\", \"Description\", \"Type\",", ": [name_filter]}} if name_filter is not None else {} groups = main.GATEWAY.ReadImages(**filter)['Images'] values", "filter = {'Filters' : {'ImageNames' : [name_filter]}} if name_filter is not None else", "{'Filters' : {'ImageNames' : [name_filter]}} if name_filter is not None else {} groups", "values = list() for g in groups: values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias'] if", "import npyscreen import pyperclip import time import createVm import main import popup import", "line) self.on_selection = on_selection def refresh(self, name_filter=None): filter = {'Filters' : {'ImageNames' :", "main import popup import selectableGrid import virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def __init__(self, screen, *args,", "= {'Filters' : {'ImageNames' : [name_filter]}} if name_filter is not None else {}", "{'ImageNames' : [name_filter]}} if name_filter is not None else {} groups = main.GATEWAY.ReadImages(**filter)['Images']", "else {} groups = main.GATEWAY.ReadImages(**filter)['Images'] values = list() for g in groups: values.append([g['ImageName'],", "screen, *args, **keywords): super().__init__(screen, *args, **keywords) self.col_titles = [\"Name\", \"Id\", \"Description\", \"Type\", \"Owner\"]", "g in groups: values.append([g['ImageName'], g['ImageId'], g['Description'], g['ImageType'], g['AccountAlias'] if 'AccountAlias' in g else", "None else {} groups = main.GATEWAY.ReadImages(**filter)['Images'] values = list() for g in groups:", "import createVm import main import popup import selectableGrid import virtualMachine class ImageGrid(selectableGrid.SelectableGrid): def" ]
[]
[ "<filename>src/04_Mokaro/register_new_user.py import unittest from selenium import webdriver from api_data_mock import ApiDataMock class RegisterNewUser(unittest.TestCase):", "= driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address') password =", "setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver", "= driver.find_element_by_id('email_address') password = driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button =", "and confirm_password.is_enabled() and news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click()", "first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click() def tearDown(self): self.driver.implicitly_wait(5) self.driver.close() if __name__", "= driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and", "= driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address') password = driver.find_element_by_id('password') confirm_password =", "self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and last_name.is_enabled() and email_address.is_enabled() and password.is_enabled() and confirm_password.is_enabled() and news_letter_subscription.is_enabled()", "import unittest from selenium import webdriver from api_data_mock import ApiDataMock class RegisterNewUser(unittest.TestCase): def", "import webdriver from api_data_mock import ApiDataMock class RegisterNewUser(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver')", "selenium import webdriver from api_data_mock import ApiDataMock class RegisterNewUser(unittest.TestCase): def setUp(self): self.driver =", "Customer Account', driver.title) first_name = driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname') email_address", "driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address') password = driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation')", "= driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and last_name.is_enabled() and email_address.is_enabled() and password.is_enabled() and confirm_password.is_enabled()", "and news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click() def tearDown(self):", "news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click() def tearDown(self): self.driver.implicitly_wait(5)", "news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and last_name.is_enabled() and email_address.is_enabled()", "class RegisterNewUser(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create')", "email_address.is_enabled() and password.is_enabled() and confirm_password.is_enabled() and news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address)", "self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver = self.driver self.assertEqual('Create New Customer Account',", "driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled()", "submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click() def tearDown(self): self.driver.implicitly_wait(5) self.driver.close() if", "ApiDataMock class RegisterNewUser(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver driver.implicitly_wait(10) driver.maximize_window()", "last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click() def tearDown(self): self.driver.implicitly_wait(5) self.driver.close() if __name__ == '__main__':", "last_name = driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address') password = driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription", "middle_name.is_enabled() and last_name.is_enabled() and email_address.is_enabled() and password.is_enabled() and confirm_password.is_enabled() and news_letter_subscription.is_enabled() and submit_button.is_enabled())", "= driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and last_name.is_enabled()", "import ApiDataMock class RegisterNewUser(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver driver.implicitly_wait(10)", "email_address = driver.find_element_by_id('email_address') password = driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button", "driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver = self.driver self.assertEqual('Create New Customer Account', driver.title)", "driver.find_element_by_id('email_address') password = driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span')", "def setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self):", "New Customer Account', driver.title) first_name = driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname')", "from selenium import webdriver from api_data_mock import ApiDataMock class RegisterNewUser(unittest.TestCase): def setUp(self): self.driver", "confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and", "Account', driver.title) first_name = driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname') email_address =", "test_new_user(self): driver = self.driver self.assertEqual('Create New Customer Account', driver.title) first_name = driver.find_element_by_id('firstname') middle_name", "unittest from selenium import webdriver from api_data_mock import ApiDataMock class RegisterNewUser(unittest.TestCase): def setUp(self):", "confirm_password.is_enabled() and news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click() def", "and last_name.is_enabled() and email_address.is_enabled() and password.is_enabled() and confirm_password.is_enabled() and news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name)", "middle_name = driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address') password = driver.find_element_by_id('password') confirm_password", "and password.is_enabled() and confirm_password.is_enabled() and news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password)", "driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver = self.driver self.assertEqual('Create New Customer Account', driver.title) first_name", "driver.title) first_name = driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address')", "and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click() def tearDown(self): self.driver.implicitly_wait(5) self.driver.close()", "driver = self.driver self.assertEqual('Create New Customer Account', driver.title) first_name = driver.find_element_by_id('firstname') middle_name =", "and middle_name.is_enabled() and last_name.is_enabled() and email_address.is_enabled() and password.is_enabled() and confirm_password.is_enabled() and news_letter_subscription.is_enabled() and", "driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and last_name.is_enabled() and", "def test_new_user(self): driver = self.driver self.assertEqual('Create New Customer Account', driver.title) first_name = driver.find_element_by_id('firstname')", "from api_data_mock import ApiDataMock class RegisterNewUser(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver =", "last_name.is_enabled() and email_address.is_enabled() and password.is_enabled() and confirm_password.is_enabled() and news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name)", "middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click() def tearDown(self): self.driver.implicitly_wait(5) self.driver.close() if __name__ ==", "driver = self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver = self.driver self.assertEqual('Create New", "self.driver self.assertEqual('Create New Customer Account', driver.title) first_name = driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename') last_name", "first_name = driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address') password", "= self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver = self.driver self.assertEqual('Create New Customer", "self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver =", "= webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver = self.driver", "and email_address.is_enabled() and password.is_enabled() and confirm_password.is_enabled() and news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name)", "api_data_mock import ApiDataMock class RegisterNewUser(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver", "driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and last_name.is_enabled() and email_address.is_enabled() and password.is_enabled() and confirm_password.is_enabled() and", "password.is_enabled() and confirm_password.is_enabled() and news_letter_subscription.is_enabled() and submit_button.is_enabled()) first_name.send_keys(ApiDataMock.first_name) middle_name.send_keys(ApiDataMock.middle_name) last_name.send_keys(ApiDataMock.last_name) email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>)", "password = driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled()", "= driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and last_name.is_enabled() and email_address.is_enabled() and", "driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address') password = driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription = driver.find_element_by_id('is_subscribed')", "= self.driver self.assertEqual('Create New Customer Account', driver.title) first_name = driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename')", "RegisterNewUser(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def", "driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver = self.driver self.assertEqual('Create New Customer Account', driver.title) first_name =", "driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename') last_name = driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address') password = driver.find_element_by_id('password')", "submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and last_name.is_enabled() and email_address.is_enabled() and password.is_enabled() and", "self.assertEqual('Create New Customer Account', driver.title) first_name = driver.find_element_by_id('firstname') middle_name = driver.find_element_by_id('middlename') last_name =", "webdriver from api_data_mock import ApiDataMock class RegisterNewUser(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome(executable_path='./../chromedriver') driver", "= driver.find_element_by_id('lastname') email_address = driver.find_element_by_id('email_address') password = driver.find_element_by_id('password') confirm_password = driver.find_element_by_id('confirmation') news_letter_subscription =", "driver.find_element_by_id('is_subscribed') submit_button = driver.find_element_by_xpath('//*[@id=\"form-validate\"]/div[2]/button/span/span') self.assertTrue(first_name.is_enabled() and middle_name.is_enabled() and last_name.is_enabled() and email_address.is_enabled() and password.is_enabled()", "webdriver.Chrome(executable_path='./../chromedriver') driver = self.driver driver.implicitly_wait(10) driver.maximize_window() driver.get('http://demo-store.seleniumacademy.com/customer/account/create') def test_new_user(self): driver = self.driver self.assertEqual('Create", "email_address.send_keys(ApiDataMock.email_address) password.send_keys(ApiDataMock.password) confirm_password.send_keys(<PASSWORD>) submit_button.click() def tearDown(self): self.driver.implicitly_wait(5) self.driver.close() if __name__ == '__main__': unittest.main(verbosity=2)" ]
[ "10000 self.pcaStatistic = PCA(batch_size = self.batch_size) self.scaler = StandardScaler() self.use_scalar = scalar self.latent_space", "observations, get_statistics): if self.use_scalar: print(\"Fitting the scalar...\") self.scaler.fit(observations) print(\"Transforming the scalar...\") self.df =", "\"cpu\") class PCACompression: def __init__(self, scalar, latent_space): self.fileNames = [] self.pca_main = None", "#state = [] #for obs in observation: obs = observation.flatten() if self.use_scalar: obs", "self.df = observations if get_statistics: print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main =", "state_dim_reduction(self, observation): #state = [] #for obs in observation: obs = observation.flatten() if", "self.latent_space = latent_space self.df = None def create_pca(self, observations, get_statistics): if self.use_scalar: print(\"Fitting", "observation): #state = [] #for obs in observation: obs = observation.flatten() if self.use_scalar:", "<gh_stars>1-10 import torch import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.decomposition", "self.batch_size = 10000 self.pcaStatistic = PCA(batch_size = self.batch_size) self.scaler = StandardScaler() self.use_scalar =", "self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main = PCA(n_components=self.latent_space, batch_size = self.batch_size) print(f'Fitting final PCA on", "#state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0]) return state #return torch.tensor(state, dtype=torch.float, device=device) def get_pca_dimension_info(self): return", "import torch import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.decomposition import", "import IncrementalPCA as PCA device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class PCACompression:", "self.scaler = StandardScaler() self.use_scalar = scalar self.latent_space = latent_space self.df = None def", "= self.batch_size) self.scaler = StandardScaler() self.use_scalar = scalar self.latent_space = latent_space self.df =", "if torch.cuda.is_available() else \"cpu\") class PCACompression: def __init__(self, scalar, latent_space): self.fileNames = []", "= None def create_pca(self, observations, get_statistics): if self.use_scalar: print(\"Fitting the scalar...\") self.scaler.fit(observations) print(\"Transforming", "the scalar...\") self.scaler.fit(observations) print(\"Transforming the scalar...\") self.df = self.scaler.transform(observations) else: self.df = observations", "= PCA(batch_size = self.batch_size) self.scaler = StandardScaler() self.use_scalar = scalar self.latent_space = latent_space", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class PCACompression: def __init__(self, scalar, latent_space):", "get_statistics: print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main = PCA(n_components=self.latent_space, batch_size = self.batch_size)", "= self.scaler.transform(observations) else: self.df = observations if get_statistics: print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df) def", "scalar...\") self.df = self.scaler.transform(observations) else: self.df = observations if get_statistics: print(\"Fitting statistics PCA...\")", "= self.scaler.transform([obs]) else: obs = [obs] #state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0]) return state #return", "space {self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self, observation): #state = [] #for obs in observation:", "if self.use_scalar: print(\"Fitting the scalar...\") self.scaler.fit(observations) print(\"Transforming the scalar...\") self.df = self.scaler.transform(observations) else:", "if get_statistics: print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main = PCA(n_components=self.latent_space, batch_size =", "[] self.pca_main = None self.batch_size = 10000 self.pcaStatistic = PCA(batch_size = self.batch_size) self.scaler", "None def create_pca(self, observations, get_statistics): if self.use_scalar: print(\"Fitting the scalar...\") self.scaler.fit(observations) print(\"Transforming the", "PCA(batch_size = self.batch_size) self.scaler = StandardScaler() self.use_scalar = scalar self.latent_space = latent_space self.df", "= torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class PCACompression: def __init__(self, scalar, latent_space): self.fileNames", "self.pcaStatistic = PCA(batch_size = self.batch_size) self.scaler = StandardScaler() self.use_scalar = scalar self.latent_space =", "= scalar self.latent_space = latent_space self.df = None def create_pca(self, observations, get_statistics): if", "torch.cuda.is_available() else \"cpu\") class PCACompression: def __init__(self, scalar, latent_space): self.fileNames = [] self.pca_main", "batch_size = self.batch_size) print(f'Fitting final PCA on latent space {self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self,", "obs = observation.flatten() if self.use_scalar: obs = self.scaler.transform([obs]) else: obs = [obs] #state.append(self.pca_main.transform(obs)[0])", "def update_pca(self): self.pca_main = PCA(n_components=self.latent_space, batch_size = self.batch_size) print(f'Fitting final PCA on latent", "if self.use_scalar: obs = self.scaler.transform([obs]) else: obs = [obs] #state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0])", "import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.decomposition import IncrementalPCA as", "None self.batch_size = 10000 self.pcaStatistic = PCA(batch_size = self.batch_size) self.scaler = StandardScaler() self.use_scalar", "self.use_scalar: obs = self.scaler.transform([obs]) else: obs = [obs] #state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0]) return", "= [] #for obs in observation: obs = observation.flatten() if self.use_scalar: obs =", "scalar, latent_space): self.fileNames = [] self.pca_main = None self.batch_size = 10000 self.pcaStatistic =", "np from sklearn.preprocessing import StandardScaler from sklearn.decomposition import IncrementalPCA as PCA device =", "StandardScaler() self.use_scalar = scalar self.latent_space = latent_space self.df = None def create_pca(self, observations,", "class PCACompression: def __init__(self, scalar, latent_space): self.fileNames = [] self.pca_main = None self.batch_size", "statistics PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main = PCA(n_components=self.latent_space, batch_size = self.batch_size) print(f'Fitting final", "PCA(n_components=self.latent_space, batch_size = self.batch_size) print(f'Fitting final PCA on latent space {self.latent_space}') self.pca_main.fit(self.df) def", "in observation: obs = observation.flatten() if self.use_scalar: obs = self.scaler.transform([obs]) else: obs =", "= [] self.pca_main = None self.batch_size = 10000 self.pcaStatistic = PCA(batch_size = self.batch_size)", "scalar self.latent_space = latent_space self.df = None def create_pca(self, observations, get_statistics): if self.use_scalar:", "else: obs = [obs] #state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0]) return state #return torch.tensor(state, dtype=torch.float,", "observation.flatten() if self.use_scalar: obs = self.scaler.transform([obs]) else: obs = [obs] #state.append(self.pca_main.transform(obs)[0]) state =", "latent space {self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self, observation): #state = [] #for obs in", "print(f'Fitting final PCA on latent space {self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self, observation): #state =", "self.df = self.scaler.transform(observations) else: self.df = observations if get_statistics: print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df)", "__init__(self, scalar, latent_space): self.fileNames = [] self.pca_main = None self.batch_size = 10000 self.pcaStatistic", "IncrementalPCA as PCA device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class PCACompression: def", "sklearn.preprocessing import StandardScaler from sklearn.decomposition import IncrementalPCA as PCA device = torch.device(\"cuda\" if", "def create_pca(self, observations, get_statistics): if self.use_scalar: print(\"Fitting the scalar...\") self.scaler.fit(observations) print(\"Transforming the scalar...\")", "torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class PCACompression: def __init__(self, scalar, latent_space): self.fileNames =", "self.use_scalar: print(\"Fitting the scalar...\") self.scaler.fit(observations) print(\"Transforming the scalar...\") self.df = self.scaler.transform(observations) else: self.df", "self.pca_main.fit(self.df) def state_dim_reduction(self, observation): #state = [] #for obs in observation: obs =", "as PCA device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class PCACompression: def __init__(self,", "self.batch_size) self.scaler = StandardScaler() self.use_scalar = scalar self.latent_space = latent_space self.df = None", "self.df = None def create_pca(self, observations, get_statistics): if self.use_scalar: print(\"Fitting the scalar...\") self.scaler.fit(observations)", "self.scaler.transform(observations) else: self.df = observations if get_statistics: print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self):", "sklearn.decomposition import IncrementalPCA as PCA device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class", "= self.batch_size) print(f'Fitting final PCA on latent space {self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self, observation):", "self.scaler.fit(observations) print(\"Transforming the scalar...\") self.df = self.scaler.transform(observations) else: self.df = observations if get_statistics:", "= observation.flatten() if self.use_scalar: obs = self.scaler.transform([obs]) else: obs = [obs] #state.append(self.pca_main.transform(obs)[0]) state", "the scalar...\") self.df = self.scaler.transform(observations) else: self.df = observations if get_statistics: print(\"Fitting statistics", "latent_space self.df = None def create_pca(self, observations, get_statistics): if self.use_scalar: print(\"Fitting the scalar...\")", "#for obs in observation: obs = observation.flatten() if self.use_scalar: obs = self.scaler.transform([obs]) else:", "PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main = PCA(n_components=self.latent_space, batch_size = self.batch_size) print(f'Fitting final PCA", "as np from sklearn.preprocessing import StandardScaler from sklearn.decomposition import IncrementalPCA as PCA device", "self.pca_main = None self.batch_size = 10000 self.pcaStatistic = PCA(batch_size = self.batch_size) self.scaler =", "self.batch_size) print(f'Fitting final PCA on latent space {self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self, observation): #state", "StandardScaler from sklearn.decomposition import IncrementalPCA as PCA device = torch.device(\"cuda\" if torch.cuda.is_available() else", "print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main = PCA(n_components=self.latent_space, batch_size = self.batch_size) print(f'Fitting", "update_pca(self): self.pca_main = PCA(n_components=self.latent_space, batch_size = self.batch_size) print(f'Fitting final PCA on latent space", "torch import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.decomposition import IncrementalPCA", "numpy as np from sklearn.preprocessing import StandardScaler from sklearn.decomposition import IncrementalPCA as PCA", "state = np.array(self.pca_main.transform(obs)[0]) return state #return torch.tensor(state, dtype=torch.float, device=device) def get_pca_dimension_info(self): return np.cumsum(self.pcaStatistic.explained_variance_ratio_)", "else \"cpu\") class PCACompression: def __init__(self, scalar, latent_space): self.fileNames = [] self.pca_main =", "on latent space {self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self, observation): #state = [] #for obs", "obs = [obs] #state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0]) return state #return torch.tensor(state, dtype=torch.float, device=device)", "PCA on latent space {self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self, observation): #state = [] #for", "[] #for obs in observation: obs = observation.flatten() if self.use_scalar: obs = self.scaler.transform([obs])", "= PCA(n_components=self.latent_space, batch_size = self.batch_size) print(f'Fitting final PCA on latent space {self.latent_space}') self.pca_main.fit(self.df)", "final PCA on latent space {self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self, observation): #state = []", "get_statistics): if self.use_scalar: print(\"Fitting the scalar...\") self.scaler.fit(observations) print(\"Transforming the scalar...\") self.df = self.scaler.transform(observations)", "print(\"Fitting the scalar...\") self.scaler.fit(observations) print(\"Transforming the scalar...\") self.df = self.scaler.transform(observations) else: self.df =", "= StandardScaler() self.use_scalar = scalar self.latent_space = latent_space self.df = None def create_pca(self,", "else: self.df = observations if get_statistics: print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main", "obs in observation: obs = observation.flatten() if self.use_scalar: obs = self.scaler.transform([obs]) else: obs", "import StandardScaler from sklearn.decomposition import IncrementalPCA as PCA device = torch.device(\"cuda\" if torch.cuda.is_available()", "PCA device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class PCACompression: def __init__(self, scalar,", "scalar...\") self.scaler.fit(observations) print(\"Transforming the scalar...\") self.df = self.scaler.transform(observations) else: self.df = observations if", "{self.latent_space}') self.pca_main.fit(self.df) def state_dim_reduction(self, observation): #state = [] #for obs in observation: obs", "obs = self.scaler.transform([obs]) else: obs = [obs] #state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0]) return state", "latent_space): self.fileNames = [] self.pca_main = None self.batch_size = 10000 self.pcaStatistic = PCA(batch_size", "= 10000 self.pcaStatistic = PCA(batch_size = self.batch_size) self.scaler = StandardScaler() self.use_scalar = scalar", "= [obs] #state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0]) return state #return torch.tensor(state, dtype=torch.float, device=device) def", "= latent_space self.df = None def create_pca(self, observations, get_statistics): if self.use_scalar: print(\"Fitting the", "[obs] #state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0]) return state #return torch.tensor(state, dtype=torch.float, device=device) def get_pca_dimension_info(self):", "create_pca(self, observations, get_statistics): if self.use_scalar: print(\"Fitting the scalar...\") self.scaler.fit(observations) print(\"Transforming the scalar...\") self.df", "self.fileNames = [] self.pca_main = None self.batch_size = 10000 self.pcaStatistic = PCA(batch_size =", "def state_dim_reduction(self, observation): #state = [] #for obs in observation: obs = observation.flatten()", "observations if get_statistics: print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main = PCA(n_components=self.latent_space, batch_size", "from sklearn.decomposition import IncrementalPCA as PCA device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "= None self.batch_size = 10000 self.pcaStatistic = PCA(batch_size = self.batch_size) self.scaler = StandardScaler()", "observation: obs = observation.flatten() if self.use_scalar: obs = self.scaler.transform([obs]) else: obs = [obs]", "= observations if get_statistics: print(\"Fitting statistics PCA...\") self.pcaStatistic.fit(self.df) def update_pca(self): self.pca_main = PCA(n_components=self.latent_space,", "from sklearn.preprocessing import StandardScaler from sklearn.decomposition import IncrementalPCA as PCA device = torch.device(\"cuda\"", "self.scaler.transform([obs]) else: obs = [obs] #state.append(self.pca_main.transform(obs)[0]) state = np.array(self.pca_main.transform(obs)[0]) return state #return torch.tensor(state,", "self.use_scalar = scalar self.latent_space = latent_space self.df = None def create_pca(self, observations, get_statistics):", "PCACompression: def __init__(self, scalar, latent_space): self.fileNames = [] self.pca_main = None self.batch_size =", "print(\"Transforming the scalar...\") self.df = self.scaler.transform(observations) else: self.df = observations if get_statistics: print(\"Fitting", "self.pca_main = PCA(n_components=self.latent_space, batch_size = self.batch_size) print(f'Fitting final PCA on latent space {self.latent_space}')", "def __init__(self, scalar, latent_space): self.fileNames = [] self.pca_main = None self.batch_size = 10000" ]
[ "def get_next(self): return self.next def get_previous(self): return self.previous def set_data(self, new_data): self.data =", "__init__(self, init_data): self.data = init_data self.next = None self.previous = None def get_data(self):", "def __init__(self, init_data): self.data = init_data self.next = None self.previous = None def", "get_data(self): return self.data def get_next(self): return self.next def get_previous(self): return self.previous def set_data(self,", "self.next def get_previous(self): return self.previous def set_data(self, new_data): self.data = new_data def set_next(self,", "def get_previous(self): return self.previous def set_data(self, new_data): self.data = new_data def set_next(self, new_next):", "None def get_data(self): return self.data def get_next(self): return self.next def get_previous(self): return self.previous", "= None self.previous = None def get_data(self): return self.data def get_next(self): return self.next", "def set_data(self, new_data): self.data = new_data def set_next(self, new_next): self.next = new_next def", "get_next(self): return self.next def get_previous(self): return self.previous def set_data(self, new_data): self.data = new_data", "init_data): self.data = init_data self.next = None self.previous = None def get_data(self): return", "= None def get_data(self): return self.data def get_next(self): return self.next def get_previous(self): return", "self.previous = None def get_data(self): return self.data def get_next(self): return self.next def get_previous(self):", "set_data(self, new_data): self.data = new_data def set_next(self, new_next): self.next = new_next def set_previous(self,", "init_data self.next = None self.previous = None def get_data(self): return self.data def get_next(self):", "return self.data def get_next(self): return self.next def get_previous(self): return self.previous def set_data(self, new_data):", "return self.next def get_previous(self): return self.previous def set_data(self, new_data): self.data = new_data def", "self.data def get_next(self): return self.next def get_previous(self): return self.previous def set_data(self, new_data): self.data", "class DLNode: def __init__(self, init_data): self.data = init_data self.next = None self.previous =", "new_data): self.data = new_data def set_next(self, new_next): self.next = new_next def set_previous(self, new_previous):", "self.data = new_data def set_next(self, new_next): self.next = new_next def set_previous(self, new_previous): self.previous", "None self.previous = None def get_data(self): return self.data def get_next(self): return self.next def", "DLNode: def __init__(self, init_data): self.data = init_data self.next = None self.previous = None", "get_previous(self): return self.previous def set_data(self, new_data): self.data = new_data def set_next(self, new_next): self.next", "return self.previous def set_data(self, new_data): self.data = new_data def set_next(self, new_next): self.next =", "self.previous def set_data(self, new_data): self.data = new_data def set_next(self, new_next): self.next = new_next", "self.data = init_data self.next = None self.previous = None def get_data(self): return self.data", "= new_data def set_next(self, new_next): self.next = new_next def set_previous(self, new_previous): self.previous =", "new_data def set_next(self, new_next): self.next = new_next def set_previous(self, new_previous): self.previous = new_previous", "= init_data self.next = None self.previous = None def get_data(self): return self.data def", "def get_data(self): return self.data def get_next(self): return self.next def get_previous(self): return self.previous def", "self.next = None self.previous = None def get_data(self): return self.data def get_next(self): return" ]
[ "not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax", "expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title,", "ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12) #add median text medians = [med.get_ydata()[0] for med", "beta_diversity(\"braycurtis\", data, ids) g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select", "columnspan=4) top_space = 20 width=600 if len(samples_list)> 20: width = 1000 #shannon index", "bp = ax.boxplot(richness) for val in richness: x = np.random.normal(1, 0.04, 1) ax.scatter(x,", "bottom=0.2, top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas =", "= Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [shannon0[samples1].values, shannon0[samples2].values] bp = ax.boxplot(data)", "= sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select file to save the", "working_samples, sample_names, tax_level, samples1, samples2, richness, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Richness') fig", "Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [shannon0[samples1].values, shannon0[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label],", "seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids =", "fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def", "samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Richness') fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111)", "fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig,", "['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame =", "absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0) else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True)", "= Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666,", "self.save_high_resolution_figure(g, 'Select file to save the beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot", "the cluster heatmap', 'cluster_heatmap', defaultextension='.png') filename = ('.').join(filename.split('.')[:-1]) #save y-axis labels y_labels =", "for x in y_labels])) import matplotlib.pyplot as plt plt.close(\"all\") def pcoa(self, pco1_group2, pco1_group1,", "resolution \"\"\" filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"),", "data = [richness[samples1].values, richness[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12) #add", "column=0) def create_window(self): \"\"\" creates a popup window \"\"\" self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\",", "+ tax_level + ' level') self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4) top_space =", "working_samples, samples_list, tax_level): from skbio.diversity.alpha import shannon self.create_window() self.top.title('Shannon diversity') self.top.title('overview of Shannon", "absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 =", "sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=sample_names) fig = Figure(figsize=(5,6), dpi=120) ax", "c='cornflowerblue', label=samples2_label) #if pca: # ax.set_title('PCA') #else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12)", "self.abundance_df.groupAbsoluteSamples() is not None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0)", "based on Aitchison distance and the y-axis labels\"\"\" from skbio.stats.composition import clr from", "skbio.stats.composition import clr from skbio.stats.composition import multiplicative_replacement import seaborn as sns if self.abundance_df.groupAbsoluteSamples()", "import beta_diversity import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None: data0 =", "fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups':", "self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) index0 = list(data0.index)", "# ax.set_title('PCA') #else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14,", "ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame = Frame(self.frame)", "pca: self.top.title('PCA - Principal Component Analysis') method = 'PCA' else: self.top.title('PCoA - Principal", "list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0,", "# ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95,", "(median_labels, ttest_res) def beta_diversity_heatmap(self, working_samples, samples_list, tax_level): \"\"\" \"\"\" from skbio.diversity import beta_diversity", "fig.add_subplot(111) data = [shannon0[samples1].values, shannon0[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity',", "assume equal variance) from scipy.stats import ttest_ind ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res", "window \"\"\" self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1)", "self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.frame = Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0,", "from tkinter.ttk import * from tkinter.filedialog import asksaveasfilename import pandas as pd import", "#add median text medians = [med.get_ydata()[0] for med in bp['medians']] median_labels = [str(np.round(med,", "cmap=\"mako\", robust=True, annot_kws={\"size\": 6}) g = sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}, yticklabels=False)", "20 width=600 if len(samples_list)> 20: width = 1000 start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level)", "ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12) #add median text medians = [med.get_ydata()[0] for", "enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95,", "diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot as plt plt.close(\"all\") def cluster_heatmap(self, working_samples, samples_list,", "y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label) #if pca: # ax.set_title('PCA') #else: #", "in medians] from scipy.stats import ttest_ind ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res =", "20 width=600 if len(samples_list)> 20: width = 1000 #shannon index (alpha diversity) if", "ax.boxplot(shannon0) for val, in zip(shannon0): x = x = np.random.normal(1, 0.04, 1) ax.scatter(x,", "x in y_labels])) import matplotlib.pyplot as plt plt.close(\"all\") def pcoa(self, pco1_group2, pco1_group1, pco2_group2,", "Principal Coordinate Analysis') method = 'PCoA' fig = Figure(figsize=(6,6), dpi=120) ax = fig.add_subplot(111)", "saves a figure in high resolution \"\"\" filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG", "def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level): from skbio.diversity.alpha import shannon self.create_window() self.top.title('Shannon diversity') self.top.title('overview", "self.top.title('PCA - Principal Component Analysis') method = 'PCA' else: self.top.title('PCoA - Principal Coordinate", "command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self, working_samples, samples_list,", "len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is not None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples =", "'PCA' else: self.top.title('PCoA - Principal Coordinate Analysis') method = 'PCoA' fig = Figure(figsize=(6,6),", "tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(richness) for val in richness: x =", "for sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=sample_names) fig = Figure(figsize=(5,6), dpi=120)", "to save the beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot as plt plt.close(\"all\")", "if self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) data =", "from scipy.stats import ttest_ind ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)),", "yticklabels=False) filename = self.save_high_resolution_figure(g, 'Select file to save the cluster heatmap', 'cluster_heatmap', defaultextension='.png')", "marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of species') ax = fig.add_subplot(212) for i,val in", "8}) self.save_high_resolution_figure(g, 'Select file to save the beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png') import", "filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return filename #def", "the y-axis labels\"\"\" from skbio.stats.composition import clr from skbio.stats.composition import multiplicative_replacement import seaborn", "is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) index0 = list(data0.index) data1", "= fig.add_subplot(211) bp = ax.boxplot(shannon0) for val, in zip(shannon0): x = x =", "initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def beta_diversity_heatmap(self, working_samples, samples_list, tax_level): \"\"\" \"\"\"", "ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3)", "sample_names, tax_level, samples1, samples2, richness, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Richness') fig =", "matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)", "Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666)", "absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for", "all samples on ' + tax_level + ' level') self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1,", "filename #def richness_groups(self, working_samples, samples_list, tax_level): def richness_groups(self, working_samples, sample_names, tax_level, samples1, samples2,", "x = x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon", "dpi=120) ax = fig.add_subplot(111) data = [richness[samples1].values, richness[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45,", "shannon0 = pd.Series(shannon0, index=sample_names) fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data =", "import multiplicative_replacement import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None: data0 =", "median_labels = [str(np.round(med, 2)) for med in medians] #t-test (Wlech's-test does not assume", "sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=samples_list) fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True)", "scipy.stats import ttest_ind ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val:", "richness[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12) #add median text medians", "import shannon from .general_functions import * import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,", "= pd.Series(shannon0, index=samples_list) fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp =", "tax_level, samples1, samples2, richness, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Richness') fig = Figure(figsize=(5,6),", "bp['medians']] median_labels = [str(np.round(med, 2)) for med in medians] #t-test (Wlech's-test does not", "Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(richness) for val in richness:", "columnspan=4) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title,", "index=index0, columns=ids) #g = sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}) g = sns.clustermap(mr_clr_df,", "diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def beta_diversity_heatmap(self, working_samples,", "absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in sample_names:", "ax.set_ylabel('richness', fontsize=12) #add median text medians = [med.get_ydata()[0] for med in bp['medians']] median_labels", "canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness',", "filename = ('.').join(filename.split('.')[:-1]) #save y-axis labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w') as", "from skbio.diversity.alpha import shannon from .general_functions import * import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg", "defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return filename", "self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set()", "np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of ' +", "y_labels])) import matplotlib.pyplot as plt plt.close(\"all\") def pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label,", "absolute_working_samples.astype(bool).sum(axis=0) else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211)", "Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button =", "import * import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import", "index0 = list(data0.index) data1 = clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T) mr_clr = clr(mr_df) mr_clr_df", "else: shannon0 = [] for sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=sample_names)", ".general_functions import * import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure", "'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3', 'dodger blue',", "fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2)", "(alpha diversity) if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int')", "ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12) #add median text medians = [med.get_ydata()[0]", "list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0,", "equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2)", "= multiplicative_replacement(data0.T) mr_clr = clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g = sns.clustermap(mr_clr_df,", "heatmap', 'cluster_heatmap', defaultextension='.png') filename = ('.').join(filename.split('.')[:-1]) #save y-axis labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with", "files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return filename #def richness_groups(self,", "= Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH,", "not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax", "pd.Series(shannon0, index=sample_names) fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [shannon0[samples1].values, shannon0[samples2].values]", "Shannon index of all samples on ' + tax_level + ' level') self.inner_frame", "metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select file to save the beta diversity heatmap', 'beta_diversity_heatmap',", "diversity') self.top.title('overview of Shannon index of all samples on ' + tax_level +", "index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2,", "data = [shannon0[samples1].values, shannon0[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12)", "' + tax_level + ' level') self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4) top_space", "tax_level): from skbio.diversity.alpha import shannon self.create_window() self.top.title('Shannon diversity') self.top.title('overview of Shannon index of", "(\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return filename #def richness_groups(self, working_samples,", "for med in medians] #t-test (Wlech's-test does not assume equal variance) from scipy.stats", "Analysis') method = 'PCoA' fig = Figure(figsize=(6,6), dpi=120) ax = fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1,", "expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig,", "= self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) data = data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\", data, ids)", "= self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else:", "tkinter import * from tkinter.ttk import * from tkinter.filedialog import asksaveasfilename import pandas", "diversity index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0,", "ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3)", "command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_groups(self, working_samples,", "#ax.set_ylabel('number of species') ax = fig.add_subplot(212) for i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8,", "* from tkinter.ttk import * from tkinter.filedialog import asksaveasfilename import pandas as pd", "in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 =", "def __init__(self, root, abundance_df, all_tax_levels): self.root = root self.abundance_df = abundance_df self.all_tax_levels =", "= Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button", "tkinter.messagebox as tmb from skbio.diversity.alpha import shannon from .general_functions import * import matplotlib", "text medians = [med.get_ydata()[0] for med in bp['medians']] median_labels = [str(np.round(med, 2)) for", "top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig,", "y-axis labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w') as f: f.write('\\n'.join([x.strip('_') for x", "canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high", "absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 =", "ids = list(data0.columns) data = data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\", data, ids) g =", "top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw()", "med in medians] #t-test (Wlech's-test does not assume equal variance) from scipy.stats import", "= fig.add_subplot(211) bp = ax.boxplot(richness) for val in richness: x = np.random.normal(1, 0.04,", "shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1, samples2, shannon1, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Shannon", "'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4'] def save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'): \"\"\" saves a", "- Principal Coordinate Analysis') method = 'PCoA' fig = Figure(figsize=(6,6), dpi=120) ax =", "right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas", "self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_'", "canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness',", "absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon)", "richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp =", "'navy', 'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3', 'dodger blue', 'turquoise3',", "file to save the beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot as plt", "ttest_ind ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08,", "= Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(richness) for val in", "ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3,", "sns if self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) data", "canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_groups':", "multiplicative_replacement import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int')", "\"\"\" filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF", "method = 'PCA' else: self.top.title('PCoA - Principal Coordinate Analysis') method = 'PCoA' fig", "hspace=0.3, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame)", "ax = fig.add_subplot(111) data = [richness[samples1].values, richness[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12)", "open(filename+'_yaxis_labels.txt', 'w') as f: f.write('\\n'.join([x.strip('_') for x in y_labels])) import matplotlib.pyplot as plt", "plt plt.close(\"all\") def pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums, pca=False): self.create_window()", "shannon from .general_functions import * import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk", "create_window(self): \"\"\" creates a popup window \"\"\" self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\",", "= fig.add_subplot(212) for i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity", "working_samples, sample_names, tax_level, samples1, samples2, shannon1, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Shannon diversity')", "self.create_window() self.top.title('Richness') fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [richness[samples1].values, richness[samples2].values]", "rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame =", "= 20 width=600 if len(samples_list)> 20: width = 1000 #shannon index (alpha diversity)", "= fig.add_subplot(212) for i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of", "robust=True, annot_kws={\"size\": 6}) g = sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}, yticklabels=False) filename", "self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0", "is not None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0) else:", "richness[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2,", "cluster_heatmap(self, working_samples, samples_list, tax_level): \"\"\" saves a cluster heatmap based on Aitchison distance", "rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12) #add median text medians = [med.get_ydata()[0] for med in", "title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def richness_all_samples(self, working_samples,", "richness_all_samples(self, working_samples, samples_list, tax_level): self.create_window() self.top.title('Richness') self.top.title('overview of richness of all samples on", "working_samples, samples_list, tax_level): \"\"\" saves a cluster heatmap based on Aitchison distance and", "else: self.top.title('PCoA - Principal Coordinate Analysis') method = 'PCoA' fig = Figure(figsize=(6,6), dpi=120)", "ax.set_xticklabels(['']) ax.set_ylabel('number of ' + tax_level) ax = fig.add_subplot(212) for i,val in enumerate(richness):", "fig.add_subplot(212) for i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index')", "= root self.abundance_df = abundance_df self.all_tax_levels = all_tax_levels self.HEIGHT = 400 self.COLOR_SCHEME =", "rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button = Button(self.inner_frame,", "= fig.add_subplot(111) data = [richness[samples1].values, richness[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness',", "of all samples on ' + tax_level + ' level') self.inner_frame = Frame(self.frame)", "of ' + tax_level) ax = fig.add_subplot(212) for i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index,", "y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w') as f: f.write('\\n'.join([x.strip('_') for x in y_labels]))", "width=600 if len(samples_list)> 20: width = 1000 start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if", "pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums, pca=False): self.create_window() if pca: self.top.title('PCA", "6}, yticklabels=False) filename = self.save_high_resolution_figure(g, 'Select file to save the cluster heatmap', 'cluster_heatmap',", "from skbio.diversity import beta_diversity import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None:", "sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=sample_names) fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111)", "bp = ax.boxplot(shannon0) for val, in zip(shannon0): x = x = np.random.normal(1, 0.04,", "#fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas", "ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label) #if pca: # ax.set_title('PCA') #else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12)", "title, initialfile)) save_button.grid(row=1, column=0) def create_window(self): \"\"\" creates a popup window \"\"\" self.top", "pd.Series(shannon0, index=samples_list) fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(shannon0)", "filename = self.save_high_resolution_figure(g, 'Select file to save the cluster heatmap', 'cluster_heatmap', defaultextension='.png') filename", "c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label) #if pca: # ax.set_title('PCA') #else: # ax.set_title('PCoA')", "self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set() def cancel(self, event=None):", "\"\"\" \"\"\" from skbio.diversity import beta_diversity import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is", "all_tax_levels): self.root = root self.abundance_df = abundance_df self.all_tax_levels = all_tax_levels self.HEIGHT = 400", "shannon0 = pd.Series(shannon0, index=samples_list) fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp", "'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4'] def save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'): \"\"\"", "samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Richness') fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data", "fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level):", "cluster heatmap based on Aitchison distance and the y-axis labels\"\"\" from skbio.stats.composition import", "expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title,", "(\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return filename #def richness_groups(self, working_samples, samples_list, tax_level): def richness_groups(self,", "if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0 =", "Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [richness[samples1].values, richness[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label],", "Figure matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance import squareform class PopUpIncludingMatplotlib(): def __init__(self, root, abundance_df,", "= ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12) #add median text medians = [med.get_ydata()[0]", "Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0)", "import * from tkinter.ttk import * from tkinter.filedialog import asksaveasfilename import pandas as", "= ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2,", "= Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1,", "Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4)", "index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select file to save the beta diversity", "columns=ids) #g = sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}) g = sns.clustermap(mr_clr_df, metric=\"euclidean\",", "if self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) index0 =", "' level') self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4) top_space = 20 width=600 if", "#self.top.maxsize(width=666, height=666) self.top.focus_set() def cancel(self, event=None): \"\"\" destroys/closes pop up windows \"\"\" self.top.destroy()", "pca=False): self.create_window() if pca: self.top.title('PCA - Principal Component Analysis') method = 'PCA' else:", "asksaveasfilename import pandas as pd import numpy as np import tkinter.messagebox as tmb", "= [richness[samples1].values, richness[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12) #add median", "data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) data = data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\", data,", "sample_names, tax_level, samples1, samples2, shannon1, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Shannon diversity') if", "f.write('\\n'.join([x.strip('_') for x in y_labels])) import matplotlib.pyplot as plt plt.close(\"all\") def pcoa(self, pco1_group2,", "def richness_groups(self, working_samples, sample_names, tax_level, samples1, samples2, richness, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window()", "Coordinate Analysis') method = 'PCoA' fig = Figure(figsize=(6,6), dpi=120) ax = fig.add_subplot(111) ax.scatter(x=pco1_group1,", "title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def beta_diversity_heatmap(self,", "Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)", "matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure matplotlib.rcParams.update({'font.size': 10}) from", "shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=samples_list) fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211)", "fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame", "self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0", "ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98,", "shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=sample_names) fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data", "= list(data0.columns) index0 = list(data0.index) data1 = clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T) mr_clr =", "'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2,", "self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.frame = Frame(self.top) self.frame.grid(row=0,", "annot_kws={\"size\": 6}, yticklabels=False) filename = self.save_high_resolution_figure(g, 'Select file to save the cluster heatmap',", "self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.frame = Frame(self.top) self.frame.grid(row=0, column=0,", "fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas =", "samples2, richness, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Richness') fig = Figure(figsize=(5,6), dpi=120) ax", "return (median_labels, ttest_res) def richness_all_samples(self, working_samples, samples_list, tax_level): self.create_window() self.top.title('Richness') self.top.title('overview of richness", "bp['medians']] median_labels = [str(np.round(med, 2)) for med in medians] from scipy.stats import ttest_ind", "canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon", "beta_diversity_heatmap(self, working_samples, samples_list, tax_level): \"\"\" \"\"\" from skbio.diversity import beta_diversity import seaborn as", "= FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button = Button(self.inner_frame, text=\"Save (high resolution)\",", "marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of ' + tax_level) ax = fig.add_subplot(212) for i,val", "= self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else:", "richness_groups(self, working_samples, sample_names, tax_level, samples1, samples2, richness, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Richness')", "1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of species') ax =", "0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of ' + tax_level)", "FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda", "import os, sys from tkinter import * from tkinter.ttk import * from tkinter.filedialog", "= ax.boxplot(shannon0) for val, in zip(shannon0): x = x = np.random.normal(1, 0.04, 1)", "= asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename,", "else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp", "creates a popup window \"\"\" self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\",", "tax_level) ax = fig.add_subplot(212) for i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples')", "Analysis') method = 'PCA' else: self.top.title('PCoA - Principal Coordinate Analysis') method = 'PCoA'", "sns if self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) index0", "skbio.diversity.alpha import shannon from .general_functions import * import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import", "plt plt.close(\"all\") def cluster_heatmap(self, working_samples, samples_list, tax_level): \"\"\" saves a cluster heatmap based", "weight=1) self.top.rowconfigure(0, weight=1) self.frame = Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1)", "multiplicative_replacement(data0.T) mr_clr = clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g = sns.clustermap(mr_clr_df, metric=\"correlation\",", "import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids", "data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) index0 = list(data0.index) data1 = clr(data0.transpose().values.tolist()) mr_df", "'+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame)", "matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button", "in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of ' + tax_level) fig.subplots_adjust(left=0.1,", "dpi=120) ax = fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label) #if", "in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2,", "None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in", "heatmap based on Aitchison distance and the y-axis labels\"\"\" from skbio.stats.composition import clr", "for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix()))", "pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums, pca=False): self.create_window() if pca: self.top.title('PCA -", "matplotlib.figure import Figure matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance import squareform class PopUpIncludingMatplotlib(): def __init__(self,", "columnspan=4) top_space = 20 width=600 if len(samples_list)> 20: width = 1000 start_idx =", "plt.close(\"all\") def cluster_heatmap(self, working_samples, samples_list, tax_level): \"\"\" saves a cluster heatmap based on", "wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH,", "wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM,", "text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return", "samples_list, tax_level): from skbio.diversity.alpha import shannon self.create_window() self.top.title('Shannon diversity') self.top.title('overview of Shannon index", "save_button.grid(row=1, column=0) def create_window(self): \"\"\" creates a popup window \"\"\" self.top = Toplevel(self.root)", "#t-test (Wlech's-test does not assume equal variance) from scipy.stats import ttest_ind ttest_result =", "pc_nums, pca=False): self.create_window() if pca: self.top.title('PCA - Principal Component Analysis') method = 'PCA'", "self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.frame = Frame(self.top)", "command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res)", "#fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas", "Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0)", "Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(shannon0) for val, in zip(shannon0):", "def create_window(self): \"\"\" creates a popup window \"\"\" self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel)", "matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1,", "= 20 width=600 if len(samples_list)> 20: width = 1000 start_idx = len(self.all_tax_levels) -", "matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig,", "'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3', 'dodger blue', 'turquoise3', 'mediumorchid4', 'royalblue1', 'red3', 'springgreen3', 'steelblue2',", "robust=True, annot_kws={\"size\": 6}, yticklabels=False) filename = self.save_high_resolution_figure(g, 'Select file to save the cluster", "self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def create_window(self): \"\"\" creates a popup window \"\"\"", "index (alpha diversity) if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples =", "= abundance_df self.all_tax_levels = all_tax_levels self.HEIGHT = 400 self.COLOR_SCHEME = ['deepskyblue', 'forestgreen', 'navy',", "beta_diversity import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int')", "not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) data = data0.transpose().values.tolist() bc_dm =", "i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of ' + tax_level)", "matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM,", "right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig,", "working_samples, samples_list, tax_level): \"\"\" \"\"\" from skbio.diversity import beta_diversity import seaborn as sns", "to save the cluster heatmap', 'cluster_heatmap', defaultextension='.png') filename = ('.').join(filename.split('.')[:-1]) #save y-axis labels", "for val in richness: x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.',", "root, abundance_df, all_tax_levels): self.root = root self.abundance_df = abundance_df self.all_tax_levels = all_tax_levels self.HEIGHT", "= Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1,", "column=0, columnspan=4) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig,", "initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1, samples2, shannon1, samples1_label, samples2_label):", "fig.add_subplot(212) for i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of '", "ax.set_xlabel('samples') ax.set_ylabel('number of ' + tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame", "top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig,", "list(data0.columns) data = data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\", data, ids) g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids,", "for val, in zip(shannon0): x = x = np.random.normal(1, 0.04, 1) ax.scatter(x, val,", "the beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot as plt plt.close(\"all\") def cluster_heatmap(self,", "('.').join(filename.split('.')[:-1]) #save y-axis labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w') as f: f.write('\\n'.join([x.strip('_')", "diversity) if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0", "right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas =", "(median_labels, ttest_res) def richness_all_samples(self, working_samples, samples_list, tax_level): self.create_window() self.top.title('Richness') self.top.title('overview of richness of", "(high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self,", "figure in high resolution \"\"\" filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS", "1000 #shannon index (alpha diversity) if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples()", "y-axis labels\"\"\" from skbio.stats.composition import clr from skbio.stats.composition import multiplicative_replacement import seaborn as", "'turquoise4', 'gold3', 'dodger blue', 'turquoise3', 'mediumorchid4', 'royalblue1', 'red3', 'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4',", "save the cluster heatmap', 'cluster_heatmap', defaultextension='.png') filename = ('.').join(filename.split('.')[:-1]) #save y-axis labels y_labels", "= Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.frame", "= np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of '", "len(samples_list)> 20: width = 1000 start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is", "self.top.title('overview of richness of all samples on ' + tax_level + ' level')", "fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame = Frame(self.frame)", "ttest_res) def beta_diversity_heatmap(self, working_samples, samples_list, tax_level): \"\"\" \"\"\" from skbio.diversity import beta_diversity import", "\"\"\" saves a figure in high resolution \"\"\" filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension,", "val, in zip(shannon0): x = x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey',", "import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance import squareform", "matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig,", "\"\"\" \"\"\" self.create_window() self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples()", "fig.add_subplot(111) data = [richness[samples1].values, richness[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12)", "Principal Component Analysis') method = 'PCA' else: self.top.title('PCoA - Principal Coordinate Analysis') method", "self.HEIGHT = 400 self.COLOR_SCHEME = ['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen', 'hotpink4',", "matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure matplotlib.rcParams.update({'font.size': 10})", "self.root = root self.abundance_df = abundance_df self.all_tax_levels = all_tax_levels self.HEIGHT = 400 self.COLOR_SCHEME", "'Select file to save the cluster heatmap', 'cluster_heatmap', defaultextension='.png') filename = ('.').join(filename.split('.')[:-1]) #save", "canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity',", "matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame,", "data = data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\", data, ids) g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids),", "sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}) g = sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\":", "as tmb from skbio.diversity.alpha import shannon from .general_functions import * import matplotlib matplotlib.use('TkAgg')", "column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save", "fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label) #if pca: # ax.set_title('PCA')", "top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame)", "title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level): from", "text=\"Save (high resolution)\", command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def", "from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance", "def richness_all_samples(self, working_samples, samples_list, tax_level): self.create_window() self.top.title('Richness') self.top.title('overview of richness of all samples", "pca: # ax.set_title('PCA') #else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1)", "index of all samples on ' + tax_level + ' level') self.inner_frame =", "variance) from scipy.stats import ttest_ind ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res = ['T_stat:", "in medians] #t-test (Wlech's-test does not assume equal variance) from scipy.stats import ttest_ind", "initialfile)) save_button.grid(row=1, column=0) def create_window(self): \"\"\" creates a popup window \"\"\" self.top =", "0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of species') ax", "= ('.').join(filename.split('.')[:-1]) #save y-axis labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w') as f:", "sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set() def cancel(self,", "weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set() def cancel(self, event=None): \"\"\" destroys/closes pop", "import ttest_ind ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))]", "'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3', 'dodger blue', 'turquoise3', 'mediumorchid4', 'royalblue1', 'red3', 'springgreen3',", "right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas", "400 self.COLOR_SCHEME = ['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4',", "richness: x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number", "ax = fig.add_subplot(212) for i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number", "width=600 if len(samples_list)> 20: width = 1000 #shannon index (alpha diversity) if self.abundance_df.groupAbsoluteSamples()", "title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def beta_diversity_heatmap(self, working_samples, samples_list, tax_level): \"\"\"", "pd import numpy as np import tkinter.messagebox as tmb from skbio.diversity.alpha import shannon", "import Figure matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance import squareform class PopUpIncludingMatplotlib(): def __init__(self, root,", "files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return filename #def richness_groups(self, working_samples, samples_list,", "column=0) return (median_labels, ttest_res) def richness_all_samples(self, working_samples, samples_list, tax_level): self.create_window() self.top.title('Richness') self.top.title('overview of", "+ tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0,", "columns=ids), metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select file to save the beta diversity heatmap',", "clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g = sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\":", "= sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}, yticklabels=False) filename = self.save_high_resolution_figure(g, 'Select file", "np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of species')", "self.inner_frame.grid(row=1, column=0, columnspan=4) top_space = 20 width=600 if len(samples_list)> 20: width = 1000", "Figure(figsize=(6,6), dpi=120) ax = fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label)", "samples_list, tax_level): def richness_groups(self, working_samples, sample_names, tax_level, samples1, samples2, richness, samples1_label, samples2_label): \"\"\"", "sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select file to save the beta", "self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_'", "'Select file to save the beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot as", "squareform class PopUpIncludingMatplotlib(): def __init__(self, root, abundance_df, all_tax_levels): self.root = root self.abundance_df =", "= sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}) g = sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True,", "annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select file to save the beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png')", "not assume equal variance) from scipy.stats import ttest_ind ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False)", "self.create_window() self.top.title('Richness') self.top.title('overview of richness of all samples on ' + tax_level +", "self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) data = data0.transpose().values.tolist()", "y=pco2_group2, c='cornflowerblue', label=samples2_label) #if pca: # ax.set_title('PCA') #else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1),", "is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for", "x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number", "= Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile))", "ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97,", "shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample", "from skbio.diversity.alpha import shannon self.create_window() self.top.title('Shannon diversity') self.top.title('overview of Shannon index of all", "bc_dm = beta_diversity(\"braycurtis\", data, ids) g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\": 8})", "ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12) #add median text medians = [med.get_ydata()[0] for", "'royalblue1', 'red3', 'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4'] def save_high_resolution_figure(self, fig, title, initialfile,", "Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1,", "ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of species') ax = fig.add_subplot(212)", "skbio.stats.composition import multiplicative_replacement import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None: data0", "ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2,", "samples2_label, pc_nums, pca=False): self.create_window() if pca: self.top.title('PCA - Principal Component Analysis') method =", "samples_list, tax_level): self.create_window() self.top.title('Richness') self.top.title('overview of richness of all samples on ' +", "= absolute_working_samples.astype(bool).sum(axis=0) else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax =", "ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of species') ax = fig.add_subplot(212) for i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.')", "Component Analysis') method = 'PCA' else: self.top.title('PCoA - Principal Coordinate Analysis') method =", "width = 1000 #shannon index (alpha diversity) if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples", "import shannon self.create_window() self.top.title('Shannon diversity') self.top.title('overview of Shannon index of all samples on", "as plt plt.close(\"all\") def pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums, pca=False):", "bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12) #add median text medians", "'PCoA' fig = Figure(figsize=(6,6), dpi=120) ax = fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2,", "column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button =", "height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set() def cancel(self, event=None): \"\"\" destroys/closes pop up windows \"\"\"", "does not assume equal variance) from scipy.stats import ttest_ind ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values,", "* import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure", "working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(richness) for", "zip(shannon0): x = x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4)", "heatmap', 'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot as plt plt.close(\"all\") def cluster_heatmap(self, working_samples, samples_list, tax_level):", "label=samples2_label) #if pca: # ax.set_title('PCA') #else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best',", "save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile))", "from skbio.stats.composition import clr from skbio.stats.composition import multiplicative_replacement import seaborn as sns if", "None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) index0 = list(data0.index) data1 = clr(data0.transpose().values.tolist())", "import tkinter.messagebox as tmb from skbio.diversity.alpha import shannon from .general_functions import * import", "'turquoise3', 'mediumorchid4', 'royalblue1', 'red3', 'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4'] def save_high_resolution_figure(self, fig,", "clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T) mr_clr = clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g", "import ttest_ind ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))]", "of species') ax = fig.add_subplot(212) for i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical')", "#else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1,", "self.top.title('Richness') fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [richness[samples1].values, richness[samples2].values] bp", "' + tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2,", "shannon0 = [] for sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=sample_names) fig", "equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2)", "species') ax = fig.add_subplot(212) for i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples')", "Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0)", "on Aitchison distance and the y-axis labels\"\"\" from skbio.stats.composition import clr from skbio.stats.composition", "initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1, samples2,", "= ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2,", "ax = fig.add_subplot(211) bp = ax.boxplot(shannon0) for val, in zip(shannon0): x = x", "in zip(shannon0): x = x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.',", "as np import tkinter.messagebox as tmb from skbio.diversity.alpha import shannon from .general_functions import", "#save y-axis labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w') as f: f.write('\\n'.join([x.strip('_') for", "initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def beta_diversity_heatmap(self, working_samples, samples_list,", "is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for", "scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas", "fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_groups(self, working_samples, sample_names,", "self.top.rowconfigure(0, weight=1) self.frame = Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name)", "= Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(shannon0) for val, in", "median text medians = [med.get_ydata()[0] for med in bp['medians']] median_labels = [str(np.round(med, 2))", "defaultextension='.png') import matplotlib.pyplot as plt plt.close(\"all\") def cluster_heatmap(self, working_samples, samples_list, tax_level): \"\"\" saves", "= Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1,", "bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig,", "import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure matplotlib.rcParams.update({'font.size':", "list(data0.index) data1 = clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T) mr_clr = clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T,", "FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance import squareform class", "(\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return filename #def richness_groups(self, working_samples, samples_list, tax_level):", "#shannon index (alpha diversity) if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples", "command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def", "samples_list, tax_level): \"\"\" saves a cluster heatmap based on Aitchison distance and the", "= 'PCA' else: self.top.title('PCoA - Principal Coordinate Analysis') method = 'PCoA' fig =", "canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples':", "fill=BOTH, expand=True) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples':", "from tkinter.filedialog import asksaveasfilename import pandas as pd import numpy as np import", "'cluster_heatmap', defaultextension='.png') filename = ('.').join(filename.split('.')[:-1]) #save y-axis labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt',", "initialfile, defaultextension='.png'): \"\"\" saves a figure in high resolution \"\"\" filename = asksaveasfilename(title=title,", "#self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set() def cancel(self, event=None): \"\"\" destroys/closes pop up windows", "10}) from scipy.spatial.distance import squareform class PopUpIncludingMatplotlib(): def __init__(self, root, abundance_df, all_tax_levels): self.root", "f: f.write('\\n'.join([x.strip('_') for x in y_labels])) import matplotlib.pyplot as plt plt.close(\"all\") def pcoa(self,", "a popup window \"\"\" self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0)", "[str(np.round(med, 2)) for med in medians] #t-test (Wlech's-test does not assume equal variance)", "fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2)", "ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True)", "1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of ' + tax_level) ax", "self.save_high_resolution_figure(g, 'Select file to save the cluster heatmap', 'cluster_heatmap', defaultextension='.png') filename = ('.').join(filename.split('.')[:-1])", "root self.abundance_df = abundance_df self.all_tax_levels = all_tax_levels self.HEIGHT = 400 self.COLOR_SCHEME = ['deepskyblue',", "import pandas as pd import numpy as np import tkinter.messagebox as tmb from", "for i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1,", "= absolut_working_samples[sample_names].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = []", "(Wlech's-test does not assume equal variance) from scipy.stats import ttest_ind ttest_result = ttest_ind(richness[samples1].values,", "popup window \"\"\" self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0,", "save the beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot as plt plt.close(\"all\") def", "method = 'PCoA' fig = Figure(figsize=(6,6), dpi=120) ax = fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen',", "label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label) #if pca: # ax.set_title('PCA') #else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1),", "self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def beta_diversity_heatmap(self, working_samples, samples_list, tax_level):", "self.top.title('Shannon diversity') self.top.title('overview of Shannon index of all samples on ' + tax_level", "'blue2', 'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3', 'dodger blue', 'turquoise3', 'mediumorchid4', 'royalblue1', 'red3',", "list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w') as f: f.write('\\n'.join([x.strip('_') for x in y_labels])) import matplotlib.pyplot", "= fig.add_subplot(111) data = [shannon0[samples1].values, shannon0[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon", "matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig,", "(high resolution)\", command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def create_window(self):", "ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of ' + tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95,", "= ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12) #add median text medians =", "for med in bp['medians']] median_labels = [str(np.round(med, 2)) for med in medians] from", "of richness of all samples on ' + tax_level + ' level') self.inner_frame", "c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of species') ax = fig.add_subplot(212) for i,val", "import matplotlib.pyplot as plt plt.close(\"all\") def pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label,", "- Principal Component Analysis') method = 'PCA' else: self.top.title('PCoA - Principal Coordinate Analysis')", "ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of ' + tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2,", "canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity',", "richness, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Richness') fig = Figure(figsize=(5,6), dpi=120) ax =", "text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0)", "FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda", "resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels,", "fig = Figure(figsize=(6,6), dpi=120) ax = fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2,", "matplotlib.pyplot as plt plt.close(\"all\") def pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums,", "fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(richness) for val", "dpi=600) return filename #def richness_groups(self, working_samples, samples_list, tax_level): def richness_groups(self, working_samples, sample_names, tax_level,", "shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0)", "tax_level): def richness_groups(self, working_samples, sample_names, tax_level, samples1, samples2, richness, samples1_label, samples2_label): \"\"\" \"\"\"", "numpy as np import tkinter.messagebox as tmb from skbio.diversity.alpha import shannon from .general_functions", "top_space = 20 width=600 if len(samples_list)> 20: width = 1000 #shannon index (alpha", "samples2, shannon1, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples() is not", "Aitchison distance and the y-axis labels\"\"\" from skbio.stats.composition import clr from skbio.stats.composition import", "medians] from scipy.stats import ttest_ind ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res = ['T_stat:", "import numpy as np import tkinter.messagebox as tmb from skbio.diversity.alpha import shannon from", "scipy.stats import ttest_ind ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val:", "canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.inner_frame, text=\"Save (high", "data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\", data, ids) g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\":", "alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of ' + tax_level) ax = fig.add_subplot(212) for i,val in", "if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0 =", "rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12) #add median text medians = [med.get_ydata()[0] for med", "dpi=120) ax = fig.add_subplot(111) data = [shannon0[samples1].values, shannon0[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45,", "defaultextension='.png'): \"\"\" saves a figure in high resolution \"\"\" filename = asksaveasfilename(title=title, initialfile=initialfile,", "'+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0)", "distance and the y-axis labels\"\"\" from skbio.stats.composition import clr from skbio.stats.composition import multiplicative_replacement", "#g = sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}) g = sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\",", "shannon1, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples() is not None:", "absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0) else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6),", "val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of ' + tax_level) ax = fig.add_subplot(212)", "title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_groups(self, working_samples, sample_names, tax_level,", "title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def richness_all_samples(self, working_samples, samples_list, tax_level): self.create_window()", "clr from skbio.stats.composition import multiplicative_replacement import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not", "blue', 'turquoise3', 'mediumorchid4', 'royalblue1', 'red3', 'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4'] def save_high_resolution_figure(self,", "\"\"\" \"\"\" self.create_window() self.top.title('Richness') fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data =", "len(samples_list)> 20: width = 1000 #shannon index (alpha diversity) if self.abundance_df.groupAbsoluteSamples() is not", "os, sys from tkinter import * from tkinter.ttk import * from tkinter.filedialog import", "skbio.diversity.alpha import shannon self.create_window() self.top.title('Shannon diversity') self.top.title('overview of Shannon index of all samples", "save_button.grid(row=1, column=0) return (median_labels, ttest_res) def beta_diversity_heatmap(self, working_samples, samples_list, tax_level): \"\"\" \"\"\" from", "absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in samples_list:", "absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0) else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2]", "diversity') if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int') shannon0", "ax.set_title('PCA') #else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98,", "ax = fig.add_subplot(111) data = [shannon0[samples1].values, shannon0[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12)", "self.COLOR_SCHEME = ['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4',", "self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level): from skbio.diversity.alpha import", "save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title,", "mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g = sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6})", "= clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g = sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True,", "#def richness_groups(self, working_samples, samples_list, tax_level): def richness_groups(self, working_samples, sample_names, tax_level, samples1, samples2, richness,", "save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title,", "shannon0[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12) #add median text", "= ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame", "def shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1, samples2, shannon1, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window()", "#if pca: # ax.set_title('PCA') #else: # ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False,", "weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set() def cancel(self, event=None): \"\"\"", "labels\"\"\" from skbio.stats.composition import clr from skbio.stats.composition import multiplicative_replacement import seaborn as sns", "median_labels = [str(np.round(med, 2)) for med in medians] from scipy.stats import ttest_ind ttest_result", "skbio.diversity import beta_diversity import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None: data0", "= beta_diversity(\"braycurtis\", data, ids) g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g,", "from .general_functions import * import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from", "bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12) #add median text medians =", "data1 = clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T) mr_clr = clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T, index=index0,", "\"\"\" creates a popup window \"\"\" self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1)", "filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\")))", "matplotlib.pyplot as plt plt.close(\"all\") def cluster_heatmap(self, working_samples, samples_list, tax_level): \"\"\" saves a cluster", "= FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\",", "if len(samples_list)> 20: width = 1000 #shannon index (alpha diversity) if self.abundance_df.groupAbsoluteSamples() is", "all_tax_levels self.HEIGHT = 400 self.COLOR_SCHEME = ['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen',", "in richness: x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels([''])", "absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon)", "= Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [richness[samples1].values, richness[samples2].values] bp = ax.boxplot(data)", "samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=samples_list) fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax =", "= clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T) mr_clr = clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids)", "sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}, yticklabels=False) filename = self.save_high_resolution_figure(g, 'Select file to", "text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def", "plt.close(\"all\") def pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums, pca=False): self.create_window() if", "text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0)", "diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1,", "working_samples, samples_list, tax_level): def richness_groups(self, working_samples, sample_names, tax_level, samples1, samples2, richness, samples1_label, samples2_label):", "resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res)", "columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.inner_frame, text=\"Save", "tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2,", "[] for sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=samples_list) fig = Figure(figsize=(4,6),", "tax_level): \"\"\" \"\"\" from skbio.diversity import beta_diversity import seaborn as sns if self.abundance_df.groupAbsoluteSamples()", "= FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.inner_frame, text=\"Save (high resolution)\",", "ax = fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label) #if pca:", "samples1_label, samples2_label, pc_nums, pca=False): self.create_window() if pca: self.top.title('PCA - Principal Component Analysis') method", "column=0) def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level): from skbio.diversity.alpha import shannon self.create_window() self.top.title('Shannon diversity')", "from tkinter import * from tkinter.ttk import * from tkinter.filedialog import asksaveasfilename import", "import asksaveasfilename import pandas as pd import numpy as np import tkinter.messagebox as", "equal variance) from scipy.stats import ttest_ind ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res =", "initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level): from skbio.diversity.alpha import shannon self.create_window()", "= pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g = sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}) g", "np import tkinter.messagebox as tmb from skbio.diversity.alpha import shannon from .general_functions import *", "in y_labels])) import matplotlib.pyplot as plt plt.close(\"all\") def pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1,", "shannon0 = [] for sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=samples_list) fig", "ax.set_ylabel('number of ' + tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame =", "= [] for sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=sample_names) fig =", "for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix()))", "with open(filename+'_yaxis_labels.txt', 'w') as f: f.write('\\n'.join([x.strip('_') for x in y_labels])) import matplotlib.pyplot as", "ttest_ind ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1,", "expand=True) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig,", "self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4) top_space = 20 width=600 if len(samples_list)> 20:", "fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig,", "asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600)", "= all_tax_levels self.HEIGHT = 400 self.COLOR_SCHEME = ['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4', 'blue2',", "files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return filename #def richness_groups(self, working_samples, samples_list, tax_level): def richness_groups(self, working_samples,", "column=0, columnspan=4) top_space = 20 width=600 if len(samples_list)> 20: width = 1000 #shannon", "a cluster heatmap based on Aitchison distance and the y-axis labels\"\"\" from skbio.stats.composition", "med in bp['medians']] median_labels = [str(np.round(med, 2)) for med in medians] #t-test (Wlech's-test", "tkinter.ttk import * from tkinter.filedialog import asksaveasfilename import pandas as pd import numpy", "abundance_df, all_tax_levels): self.root = root self.abundance_df = abundance_df self.all_tax_levels = all_tax_levels self.HEIGHT =", "= Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4) top_space = 20 width=600 if len(samples_list)> 20: width", "(high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return", "= self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) index0 = list(data0.index) data1 = clr(data0.transpose().values.tolist()) mr_df =", "def save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'): \"\"\" saves a figure in high resolution", "* from tkinter.filedialog import asksaveasfilename import pandas as pd import numpy as np", "6}) g = sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}, yticklabels=False) filename = self.save_high_resolution_figure(g,", "- list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is not None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int')", "columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button = Button(self.inner_frame, text=\"Save", "self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) index0 = list(data0.index) data1 = clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T)", "= list(data0.index) data1 = clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T) mr_clr = clr(mr_df) mr_clr_df =", "high resolution \"\"\" filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG", "index=samples_list) fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(shannon0) for", "matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk from matplotlib.figure import Figure matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance import", "title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level): from skbio.diversity.alpha import shannon", "ttest_res) def richness_all_samples(self, working_samples, samples_list, tax_level): self.create_window() self.top.title('Richness') self.top.title('overview of richness of all", "scipy.spatial.distance import squareform class PopUpIncludingMatplotlib(): def __init__(self, root, abundance_df, all_tax_levels): self.root = root", "ax.set_ylabel('number of ' + tax_level) ax = fig.add_subplot(212) for i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.')", "as sns if self.abundance_df.groupAbsoluteSamples() is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns)", "ax.boxplot(richness) for val in richness: x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey',", "matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button", "= fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label) #if pca: #", "from matplotlib.figure import Figure matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance import squareform class PopUpIncludingMatplotlib(): def", "self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1, samples2, shannon1,", "= 1000 start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is not None: absolute_working_samples", "initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def richness_all_samples(self, working_samples, samples_list,", "= 1000 #shannon index (alpha diversity) if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples =", "cluster heatmap', 'cluster_heatmap', defaultextension='.png') filename = ('.').join(filename.split('.')[:-1]) #save y-axis labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index)", "fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def create_window(self): \"\"\" creates a", "'dodger blue', 'turquoise3', 'mediumorchid4', 'royalblue1', 'red3', 'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4'] def", "None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in", "pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums, pca=False): self.create_window() if pca: self.top.title('PCA - Principal", "ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label) #if pca: # ax.set_title('PCA') #else:", "save_button.grid(row=1, column=0) return (median_labels, ttest_res) def richness_all_samples(self, working_samples, samples_list, tax_level): self.create_window() self.top.title('Richness') self.top.title('overview", "self.create_window() self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples =", "medians = [med.get_ydata()[0] for med in bp['medians']] median_labels = [str(np.round(med, 2)) for med", "1000 start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is not None: absolute_working_samples =", "title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1, samples2, shannon1, samples1_label,", "med in medians] from scipy.stats import ttest_ind ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res", "'gold3', 'dodger blue', 'turquoise3', 'mediumorchid4', 'royalblue1', 'red3', 'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4']", "samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples =", "ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97,", "fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(shannon0) for val,", "initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"), (\"JPEG files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return", "= absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = []", "in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=sample_names) fig = Figure(figsize=(5,6), dpi=120) ax =", "alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of species') ax = fig.add_subplot(212) for i,val in enumerate(shannon0):", "'w') as f: f.write('\\n'.join([x.strip('_') for x in y_labels])) import matplotlib.pyplot as plt plt.close(\"all\")", "class PopUpIncludingMatplotlib(): def __init__(self, root, abundance_df, all_tax_levels): self.root = root self.abundance_df = abundance_df", "save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile))", "wspace=0.2) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw()", "return (median_labels, ttest_res) def beta_diversity_heatmap(self, working_samples, samples_list, tax_level): \"\"\" \"\"\" from skbio.diversity import", "data, ids) g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select file", "= Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile))", "fig.add_subplot(211) bp = ax.boxplot(shannon0) for val, in zip(shannon0): x = x = np.random.normal(1,", "diversity', fontsize=12) #add median text medians = [med.get_ydata()[0] for med in bp['medians']] median_labels", "20: width = 1000 start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is not", "shannon0[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2,", "= 'PCoA' fig = Figure(figsize=(6,6), dpi=120) ax = fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label)", "'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot as plt plt.close(\"all\") def cluster_heatmap(self, working_samples, samples_list, tax_level): \"\"\"", "metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}, yticklabels=False) filename = self.save_high_resolution_figure(g, 'Select file to save", "pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums, pca=False): self.create_window() if pca: self.top.title('PCA - Principal Component", "as pd import numpy as np import tkinter.messagebox as tmb from skbio.diversity.alpha import", "labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w') as f: f.write('\\n'.join([x.strip('_') for x in", "index=sample_names) fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [shannon0[samples1].values, shannon0[samples2].values] bp", "in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 =", "self.abundance_df = abundance_df self.all_tax_levels = all_tax_levels self.HEIGHT = 400 self.COLOR_SCHEME = ['deepskyblue', 'forestgreen',", "def cluster_heatmap(self, working_samples, samples_list, tax_level): \"\"\" saves a cluster heatmap based on Aitchison", "fontsize=12) ax.set_ylabel('richness', fontsize=12) #add median text medians = [med.get_ydata()[0] for med in bp['medians']]", "mr_df = multiplicative_replacement(data0.T) mr_clr = clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g =", "[str(np.round(med, 2)) for med in medians] from scipy.stats import ttest_ind ttest_result = ttest_ind(shannon0[samples1].values,", "[] for sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=sample_names) fig = Figure(figsize=(5,6),", "self.create_window() if pca: self.top.title('PCA - Principal Component Analysis') method = 'PCA' else: self.top.title('PCoA", "sys from tkinter import * from tkinter.ttk import * from tkinter.filedialog import asksaveasfilename", "[shannon0[samples1].values, shannon0[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12) #add median", "initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level): from skbio.diversity.alpha", "= [shannon0[samples1].values, shannon0[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12) #add", "beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png') import matplotlib.pyplot as plt plt.close(\"all\") def cluster_heatmap(self, working_samples,", "richness of all samples on ' + tax_level + ' level') self.inner_frame =", "'skyblue4', 'firebrick4'] def save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'): \"\"\" saves a figure in", "canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon", "(high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def", "import clr from skbio.stats.composition import multiplicative_replacement import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is", "save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile))", "FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.inner_frame, text=\"Save (high resolution)\", command=lambda", "resolution)\", command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def create_window(self): \"\"\"", "width = 1000 start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is not None:", "dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(richness) for val in richness: x", "is not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) data = data0.transpose().values.tolist() bc_dm", "in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=samples_list) fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax", "20: width = 1000 #shannon index (alpha diversity) if self.abundance_df.groupAbsoluteSamples() is not None:", "from scipy.spatial.distance import squareform class PopUpIncludingMatplotlib(): def __init__(self, root, abundance_df, all_tax_levels): self.root =", "'darkolivegreen4', 'turquoise4', 'gold3', 'dodger blue', 'turquoise3', 'mediumorchid4', 'royalblue1', 'red3', 'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4',", "ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98,", "fontsize=12) #add median text medians = [med.get_ydata()[0] for med in bp['medians']] median_labels =", "weight=1) self.frame = Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666,", "g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select file to save", "fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [shannon0[samples1].values, shannon0[samples2].values] bp =", "dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(shannon0) for val, in zip(shannon0): x", "+ tax_level) ax = fig.add_subplot(212) for i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical')", "column=0) return (median_labels, ttest_res) def beta_diversity_heatmap(self, working_samples, samples_list, tax_level): \"\"\" \"\"\" from skbio.diversity", "hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw()", "i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1, right=0.98,", "' + tax_level) ax = fig.add_subplot(212) for i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8,", "val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of species') ax = fig.add_subplot(212) for", "= self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0) else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig", "column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set() def", "= absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0) else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6), dpi=120)#,", "resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_groups(self,", "fig.savefig(filename, dpi=600) return filename #def richness_groups(self, working_samples, samples_list, tax_level): def richness_groups(self, working_samples, sample_names,", "= absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in", "canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title=method,", "fontsize=12) ax.set_ylabel('Shannon diversity', fontsize=12) #add median text medians = [med.get_ydata()[0] for med in", "mr_clr = clr(mr_df) mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g = sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\",", "samples1, samples2, shannon1, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples() is", "samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples", "command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def create_window(self): \"\"\" creates", "self.top.title('overview of Shannon index of all samples on ' + tax_level + '", "__init__(self, root, abundance_df, all_tax_levels): self.root = root self.abundance_df = abundance_df self.all_tax_levels = all_tax_levels", "\"\"\" saves a cluster heatmap based on Aitchison distance and the y-axis labels\"\"\"", "annot_kws={\"size\": 6}) g = sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}, yticklabels=False) filename =", "save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level): from skbio.diversity.alpha import shannon self.create_window() self.top.title('Shannon", "rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.inner_frame,", "= list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w') as f: f.write('\\n'.join([x.strip('_') for x in y_labels])) import", "hspace=0.2, wspace=0.2) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame)", "tax_level, samples1, samples2, shannon1, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples()", "tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0", "'firebrick4'] def save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'): \"\"\" saves a figure in high", "from skbio.stats.composition import multiplicative_replacement import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not None:", "list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is not None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness", "return filename #def richness_groups(self, working_samples, samples_list, tax_level): def richness_groups(self, working_samples, sample_names, tax_level, samples1,", "for med in medians] from scipy.stats import ttest_ind ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False)", "= [med.get_ydata()[0] for med in bp['medians']] median_labels = [str(np.round(med, 2)) for med in", "def beta_diversity_heatmap(self, working_samples, samples_list, tax_level): \"\"\" \"\"\" from skbio.diversity import beta_diversity import seaborn", "richness = absolute_working_samples.astype(bool).sum(axis=0) else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax", "tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(shannon0) for val, in zip(shannon0): x =", "column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button =", "tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for sample in sample_names: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0", "fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of ' + tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2,", "resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def shannon_diversity_all_samples(self, working_samples,", "def pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums, pca=False): self.create_window() if pca:", "= pd.Series(shannon0, index=sample_names) fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [shannon0[samples1].values,", "tax_level): \"\"\" saves a cluster heatmap based on Aitchison distance and the y-axis", "import matplotlib.pyplot as plt plt.close(\"all\") def cluster_heatmap(self, working_samples, samples_list, tax_level): \"\"\" saves a", "hspace=0.4, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM,", "ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame =", "ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of ' + tax_level) ax =", "from scipy.stats import ttest_ind ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False) ttest_res = ['T_stat: '+str(round(ttest_result[0],2)),", "not None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0) else: richness", "None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0) else: richness =", "fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def richness_all_samples(self,", "on ' + tax_level + ' level') self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4)", "else: shannon0 = [] for sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=samples_list)", "bottom=0.2, top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas =", "= [] for sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=samples_list) fig =", "self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set() def cancel(self, event=None): \"\"\" destroys/closes", "'steelblue4', 'blue2', 'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3', 'dodger blue', 'turquoise3', 'mediumorchid4', 'royalblue1',", "tax_level + ' level') self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4) top_space = 20", "list(data0.columns) index0 = list(data0.index) data1 = clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T) mr_clr = clr(mr_df)", "c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of ' + tax_level) ax = fig.add_subplot(212) for", "for sample in samples_list: shannon0.append(shannon_index(working_samples[sample].as_matrix())) shannon0 = pd.Series(shannon0, index=samples_list) fig = Figure(figsize=(4,6), dpi=120)#,", "cmap=\"mako\", robust=True, annot_kws={\"size\": 6}, yticklabels=False) filename = self.save_high_resolution_figure(g, 'Select file to save the", "0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.frame = Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1)", "PopUpIncludingMatplotlib(): def __init__(self, root, abundance_df, all_tax_levels): self.root = root self.abundance_df = abundance_df self.all_tax_levels", "samples_list, tax_level): \"\"\" \"\"\" from skbio.diversity import beta_diversity import seaborn as sns if", "self.all_tax_levels = all_tax_levels self.HEIGHT = 400 self.COLOR_SCHEME = ['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4',", "med in bp['medians']] median_labels = [str(np.round(med, 2)) for med in medians] from scipy.stats", "+ ' level') self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4) top_space = 20 width=600", "'springgreen4', 'skyblue4', 'firebrick4'] def save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'): \"\"\" saves a figure", "= self.save_high_resolution_figure(g, 'Select file to save the cluster heatmap', 'cluster_heatmap', defaultextension='.png') filename =", "files\",\"*.jpg\"), (\"TIFF files\",\"*.tiff\"))) fig.savefig(filename, dpi=600) return filename #def richness_groups(self, working_samples, samples_list, tax_level): def", "= data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\", data, ids) g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis',", "column=0, columnspan=4) top_space = 20 width=600 if len(samples_list)> 20: width = 1000 start_idx", "\"\"\" self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0,", "title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def create_window(self): \"\"\" creates a popup", "= 400 self.COLOR_SCHEME = ['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen', 'hotpink4', 'deeppink4',", "saves a cluster heatmap based on Aitchison distance and the y-axis labels\"\"\" from", "of Shannon index of all samples on ' + tax_level + ' level')", "\"\"\" self.create_window() self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples", "shannon self.create_window() self.top.title('Shannon diversity') self.top.title('overview of Shannon index of all samples on '", "if self.abundance_df.groupAbsoluteSamples() is not None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness =", "1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.frame = Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E)", "None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) data = data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\",", "self.top = Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1)", "= ['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3',", "Toplevel(self.root) self.top.protocol(\"WM_DELETE_WINDOW\", self.cancel) self.top.attributes(\"-topmost\", 1) self.top.attributes(\"-topmost\", 0) self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.frame =", "self.top.title('Shannon diversity') if self.abundance_df.groupAbsoluteSamples() is not None: absolut_working_samples = self.abundance_df.groupAbsoluteSamples() absolut_working_samples = absolut_working_samples[sample_names].astype('int')", "\"\"\" from skbio.diversity import beta_diversity import seaborn as sns if self.abundance_df.groupAbsoluteSamples() is not", "ids = list(data0.columns) index0 = list(data0.index) data1 = clr(data0.transpose().values.tolist()) mr_df = multiplicative_replacement(data0.T) mr_clr", "not None: data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) index0 = list(data0.index) data1 =", "save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'): \"\"\" saves a figure in high resolution \"\"\"", "self.frame = Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0, weight=1) #self.top.title(self.name) #self.top.minsize(width=666, height=666)", "= list(data0.columns) data = data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\", data, ids) g = sns.clustermap(pd.DataFrame(bc_dm.data,", "Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4) top_space = 20 width=600 if len(samples_list)> 20: width =", "self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int') ids = list(data0.columns) data = data0.transpose().values.tolist() bc_dm = beta_diversity(\"braycurtis\", data, ids) g", "2)) for med in medians] from scipy.stats import ttest_ind ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values,", "enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of ' + tax_level) fig.subplots_adjust(left=0.1, right=0.98,", "as plt plt.close(\"all\") def cluster_heatmap(self, working_samples, samples_list, tax_level): \"\"\" saves a cluster heatmap", "tkinter.filedialog import asksaveasfilename import pandas as pd import numpy as np import tkinter.messagebox", "self.create_window() self.top.title('Shannon diversity') self.top.title('overview of Shannon index of all samples on ' +", "fig, title, initialfile, defaultextension='.png'): \"\"\" saves a figure in high resolution \"\"\" filename", "Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1,", "g = sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}, yticklabels=False) filename = self.save_high_resolution_figure(g, 'Select", "as f: f.write('\\n'.join([x.strip('_') for x in y_labels])) import matplotlib.pyplot as plt plt.close(\"all\") def", "if pca: self.top.title('PCA - Principal Component Analysis') method = 'PCA' else: self.top.title('PCoA -", "initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def richness_all_samples(self, working_samples, samples_list, tax_level): self.create_window() self.top.title('Richness')", "pd.DataFrame(mr_clr.T, index=index0, columns=ids) #g = sns.clustermap(mr_clr_df, metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}) g =", "= x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity'])", "fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [richness[samples1].values, richness[samples2].values] bp =", "ax = fig.add_subplot(212) for i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon", "ids) g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={\"size\": 8}) self.save_high_resolution_figure(g, 'Select file to", "bottom=0.1, top=0.95, hspace=0.4, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame)", "(high resolution)\", command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels,", "val in richness: x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4)", "self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.frame = Frame(self.top) self.frame.grid(row=0, column=0, sticky=N+S+W+E) self.frame.grid_columnconfigure(0, weight=1) self.frame.grid_rowconfigure(0,", "self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) return (median_labels, ttest_res) def richness_all_samples(self, working_samples, samples_list, tax_level):", "2)) for med in medians] #t-test (Wlech's-test does not assume equal variance) from", "in bp['medians']] median_labels = [str(np.round(med, 2)) for med in medians] from scipy.stats import", "shannon_diversity_all_samples(self, working_samples, samples_list, tax_level): from skbio.diversity.alpha import shannon self.create_window() self.top.title('Shannon diversity') self.top.title('overview of", "a figure in high resolution \"\"\" filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"),", "'red3', 'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4'] def save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'):", "in bp['medians']] median_labels = [str(np.round(med, 2)) for med in medians] #t-test (Wlech's-test does", "title, initialfile, defaultextension='.png'): \"\"\" saves a figure in high resolution \"\"\" filename =", "initialfile=method: self.save_high_resolution_figure(fig, title, initialfile)) save_button.grid(row=1, column=0) def create_window(self): \"\"\" creates a popup window", "'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3', 'dodger blue', 'turquoise3', 'mediumorchid4', 'royalblue1', 'red3', 'springgreen3', 'steelblue2', 'darkorange2',", "ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2,", "= Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0,", "= len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is not None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples() absolute_working_samples", "ax.set_ylabel('Shannon diversity', fontsize=12) #add median text medians = [med.get_ydata()[0] for med in bp['medians']]", "'mediumorchid4', 'royalblue1', 'red3', 'springgreen3', 'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4'] def save_high_resolution_figure(self, fig, title,", "= ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame", "in high resolution \"\"\" filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=((\"PNG files\",\"*.png\"), (\"EPS files\",\"*.eps\"),", "'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2,", "[med.get_ydata()[0] for med in bp['medians']] median_labels = [str(np.round(med, 2)) for med in medians]", "= Figure(figsize=(6,6), dpi=120) ax = fig.add_subplot(111) ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label) ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue',", "tmb from skbio.diversity.alpha import shannon from .general_functions import * import matplotlib matplotlib.use('TkAgg') from", "#self.top.title(self.name) #self.top.minsize(width=666, height=666) #self.top.maxsize(width=666, height=666) self.top.focus_set() def cancel(self, event=None): \"\"\" destroys/closes pop up", "medians] #t-test (Wlech's-test does not assume equal variance) from scipy.stats import ttest_ind ttest_result", "import * from tkinter.filedialog import asksaveasfilename import pandas as pd import numpy as", "samples on ' + tax_level + ' level') self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1, column=0,", "working_samples, samples_list, tax_level): self.create_window() self.top.title('Richness') self.top.title('overview of richness of all samples on '", "if len(samples_list)> 20: width = 1000 start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples()", "['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame =", "self.top.title('PCoA - Principal Coordinate Analysis') method = 'PCoA' fig = Figure(figsize=(6,6), dpi=120) ax", "absolut_working_samples[samples_list].astype('int') shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon) else: shannon0 = [] for", "for med in bp['medians']] median_labels = [str(np.round(med, 2)) for med in medians] #t-test", "column=0) def shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1, samples2, shannon1, samples1_label, samples2_label): \"\"\" \"\"\"", "start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level) if self.abundance_df.groupAbsoluteSamples() is not None: absolute_working_samples = self.abundance_df.groupAbsoluteSamples()", "import squareform class PopUpIncludingMatplotlib(): def __init__(self, root, abundance_df, all_tax_levels): self.root = root self.abundance_df", "= working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True) ax = fig.add_subplot(211) bp = ax.boxplot(richness)", "metric=\"correlation\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6}) g = sns.clustermap(mr_clr_df, metric=\"euclidean\", cmap=\"mako\", robust=True, annot_kws={\"size\": 6},", "wspace=0.3) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw()", "fig.add_subplot(211) bp = ax.boxplot(richness) for val in richness: x = np.random.normal(1, 0.04, 1)", "= [str(np.round(med, 2)) for med in medians] from scipy.stats import ttest_ind ttest_result =", "<reponame>klincke/MicroWineBar<gh_stars>1-10 import os, sys from tkinter import * from tkinter.ttk import * from", "canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().grid(row=1, column=0, columnspan=4) save_button = Button(self.inner_frame, text=\"Save (high", "= [str(np.round(med, 2)) for med in medians] #t-test (Wlech's-test does not assume equal", "file to save the cluster heatmap', 'cluster_heatmap', defaultextension='.png') filename = ('.').join(filename.split('.')[:-1]) #save y-axis", "ax = fig.add_subplot(211) bp = ax.boxplot(richness) for val in richness: x = np.random.normal(1,", "for i,val in enumerate(richness): ax.scatter(richness.index[i],val,marker='.') ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of ' +", "fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0) canvas = FigureCanvasTkAgg(fig, matplotlib_frame) canvas.draw() canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH,", "abundance_df self.all_tax_levels = all_tax_levels self.HEIGHT = 400 self.COLOR_SCHEME = ['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod',", "pandas as pd import numpy as np import tkinter.messagebox as tmb from skbio.diversity.alpha", "NavigationToolbar2Tk from matplotlib.figure import Figure matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance import squareform class PopUpIncludingMatplotlib():", "diversity']) #ax.set_ylabel('number of species') ax = fig.add_subplot(212) for i,val in enumerate(shannon0): ax.scatter(shannon0.index[i],val,marker='.') ax.set_xticklabels(shannon0.index,", "matplotlib.rcParams.update({'font.size': 10}) from scipy.spatial.distance import squareform class PopUpIncludingMatplotlib(): def __init__(self, root, abundance_df, all_tax_levels):", "fontsize=8, rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('Shannon diversity index') fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3) matplotlib_frame", "tax_level): self.create_window() self.top.title('Richness') self.top.title('overview of richness of all samples on ' + tax_level", "\"\"\" self.create_window() self.top.title('Richness') fig = Figure(figsize=(5,6), dpi=120) ax = fig.add_subplot(111) data = [richness[samples1].values,", "[richness[samples1].values, richness[samples2].values] bp = ax.boxplot(data) ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12) ax.set_ylabel('richness', fontsize=12) #add median text", "rotation='vertical') ax.set_xlabel('samples') ax.set_ylabel('number of ' + tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2)", "= np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['Shannon diversity']) #ax.set_ylabel('number of", "and the y-axis labels\"\"\" from skbio.stats.composition import clr from skbio.stats.composition import multiplicative_replacement import", "ax.set_title('PCoA') ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12) ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12) ax.legend(loc='best', shadow=False, scatterpoints=1) fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4,", "self.abundance_df.groupAbsoluteSamples() absolute_working_samples = absolute_working_samples[samples_list].astype('int') richness = absolute_working_samples.astype(bool).sum(axis=0) else: richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2] fig =", "ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True)", "'steelblue2', 'darkorange2', 'springgreen4', 'skyblue4', 'firebrick4'] def save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'): \"\"\" saves", "= ax.boxplot(richness) for val in richness: x = np.random.normal(1, 0.04, 1) ax.scatter(x, val,", "canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True) save_button = Button(self.frame, text=\"Save (high resolution)\", command=lambda fig=fig, title=method, initialfile=method:", "['deepskyblue', 'forestgreen', 'navy', 'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3', 'dodger", "richness_groups(self, working_samples, samples_list, tax_level): def richness_groups(self, working_samples, sample_names, tax_level, samples1, samples2, richness, samples1_label,", "'darkgoldenrod', 'steelblue4', 'blue2', 'seagreen', 'hotpink4', 'deeppink4', 'darkolivegreen4', 'turquoise4', 'gold3', 'dodger blue', 'turquoise3', 'mediumorchid4',", "'+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame) matplotlib_frame.grid(row=2, column=0)", "level') self.inner_frame = Frame(self.frame) self.inner_frame.grid(row=1, column=0, columnspan=4) top_space = 20 width=600 if len(samples_list)>", "of ' + tax_level) fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2) matplotlib_frame = Frame(self.frame)", "defaultextension='.png') filename = ('.').join(filename.split('.')[:-1]) #save y-axis labels y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index) with open(filename+'_yaxis_labels.txt', 'w')", "samples1, samples2, richness, samples1_label, samples2_label): \"\"\" \"\"\" self.create_window() self.top.title('Richness') fig = Figure(figsize=(5,6), dpi=120)", "save_button.grid(row=1, column=0) def shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1, samples2, shannon1, samples1_label, samples2_label): \"\"\"", "pco2_group1, samples1_label, samples2_label, pc_nums, pca=False): self.create_window() if pca: self.top.title('PCA - Principal Component Analysis')", "'+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))] #fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2) fig.set_tight_layout(True) matplotlib_frame = Frame(self.frame)", "x = np.random.normal(1, 0.04, 1) ax.scatter(x, val, c='grey', marker='.', alpha=0.4) ax.set_xticklabels(['']) ax.set_ylabel('number of", "self.top.title('Richness') self.top.title('overview of richness of all samples on ' + tax_level + '", "top_space = 20 width=600 if len(samples_list)> 20: width = 1000 start_idx = len(self.all_tax_levels)" ]
[ "import ( blobs, burczynski06, krumsiek11, moignard15, paul15, toggleswitch, pbmc68k_reduced, pbmc3k, pbmc3k_processed, visium_sge, )", "\"\"\" from ._datasets import ( blobs, burczynski06, krumsiek11, moignard15, paul15, toggleswitch, pbmc68k_reduced, pbmc3k,", "krumsiek11, moignard15, paul15, toggleswitch, pbmc68k_reduced, pbmc3k, pbmc3k_processed, visium_sge, ) from ._ebi_expression_atlas import ebi_expression_atlas", "blobs, burczynski06, krumsiek11, moignard15, paul15, toggleswitch, pbmc68k_reduced, pbmc3k, pbmc3k_processed, visium_sge, ) from ._ebi_expression_atlas", "from ._datasets import ( blobs, burczynski06, krumsiek11, moignard15, paul15, toggleswitch, pbmc68k_reduced, pbmc3k, pbmc3k_processed,", "\"\"\"Builtin Datasets. \"\"\" from ._datasets import ( blobs, burczynski06, krumsiek11, moignard15, paul15, toggleswitch,", "._datasets import ( blobs, burczynski06, krumsiek11, moignard15, paul15, toggleswitch, pbmc68k_reduced, pbmc3k, pbmc3k_processed, visium_sge,", "( blobs, burczynski06, krumsiek11, moignard15, paul15, toggleswitch, pbmc68k_reduced, pbmc3k, pbmc3k_processed, visium_sge, ) from", "burczynski06, krumsiek11, moignard15, paul15, toggleswitch, pbmc68k_reduced, pbmc3k, pbmc3k_processed, visium_sge, ) from ._ebi_expression_atlas import", "Datasets. \"\"\" from ._datasets import ( blobs, burczynski06, krumsiek11, moignard15, paul15, toggleswitch, pbmc68k_reduced," ]
[ "color }; ''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_assignment_03(self): # testing", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "color_enum_t SELECT e.RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference to link property 'RED'\"):", "assignment cast await self.con.execute( r''' INSERT Foo { color := 'RED' }; ''')", "await self.assert_query_result( r''' SELECT Foo { color }; ''', [{ 'color': 'RED', }],", "property reference on a primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'WITH", "Foo { color := 'BLUE' }; SELECT 'The test color is: ' ++", ".+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json", "{ color }; ''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_assignment_03(self): #", ") async def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum", "async with self._run_and_rollback(): await self.con.execute( 'WITH e := color_enum_t SELECT e.RED' ) with", "assignment cast await self.con.execute( r''' INSERT Bar; ''') await self.assert_query_result( r''' SELECT Bar", "'\\+\\+' cannot be applied to operands of type \" r\"'std::str' and 'default::color_enum_t'\"): await", "self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.<RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"an enum member name", "input value for enum .+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''') async def test_edgeql_enums_cast_03(self):", "''', {'RED'}, ) async def test_edgeql_enums_assignment_01(self): # testing the INSERT assignment cast await", ") async def test_edgeql_enums_assignment_04(self): await self.con.execute( r''' INSERT Bar; ''') # testing the", ".+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''') async def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid", "this file except in compliance with the License. # You may obtain a", "def test_edgeql_enums_assignment_01(self): # testing the INSERT assignment cast await self.con.execute( r''' INSERT Foo", "['RED']) async def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+:", "e.RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference to link property 'RED'\"): async with", "value for enum .+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''') async def test_edgeql_enums_cast_03(self): with", "ANY KIND, either express or implied. # See the License for the specific", "' ++ <str>Foo.color; ''', ['The test color is: BLUE'], ) async def test_edgeql_enums_pathsyntax_01(self):", "self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RED.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on", "self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a", "await self.con.execute(r''' INSERT Foo { color := 'BLUE' }; SELECT 'The test color", ") async def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r''' SELECT color_enum_t.GREEN; ''', {'GREEN'}, ) await", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "'SELECT color_enum_t.<RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"an enum member name must follow enum", "# limitations under the License. # import os.path import edgedb from edb.testbase import", "test_edgeql_enums_cast_01(self): await self.assert_query_result( r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''', {'RED', 'GREEN', 'BLUE'}, )", "test_edgeql_enums_cast_05(self): await self.con.execute( r''' INSERT Foo { color := 'BLUE' }; ''') await", "with self.assertRaisesRegex( edgedb.QueryError, \"enum types do not support backlink\"): async with self._run_and_rollback(): await", "await self.con.execute( 'SELECT color_enum_t.RAD' ) async def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r''' SELECT color_enum_t.GREEN;", "edgedb.QueryError, \"unexpected reference to link property 'RED'\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT", ") await self.assert_query_result( r''' WITH x := default::color_enum_t.RED SELECT x; ''', {'RED'}, )", "self.con.execute( r''' INSERT Foo { color := 'RED' }; ''') await self.assert_query_result( r'''", "{ color := 'RED' }; ''') # testing the UPDATE assignment cast await", "SELECT 'The test color is: ' ++ Foo.color; ''') async def test_edgeql_enums_cast_05(self): await", "UPDATE assignment cast await self.con.execute( r''' UPDATE Foo SET { color := 'GREEN'", "self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED']) async def test_edgeql_enums_json_cast_02(self): with", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "color }; ''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_04(self): await self.con.execute(", "\"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json string", "edgedb.QueryError, \"invalid property reference on a primitive type expression\"): async with self._run_and_rollback(): await", "cast await self.con.execute( r''' UPDATE Foo SET { color := 'GREEN' }; ''')", "limitations under the License. # import os.path import edgedb from edb.testbase import server", "test color is: ' ++ <str>Foo.color; ''', ['The test color is: BLUE'], )", "self.assertRaisesRegex( edgedb.QueryError, \"enum has no member called 'RAD'\", _hint=\"did you mean 'RED'?\"): async", "self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RAD' ) async def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r''' SELECT", "OF ANY KIND, either express or implied. # See the License for the", "def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r''' SELECT color_enum_t.GREEN; ''', {'GREEN'}, ) await self.assert_query_result( r'''", "await self.assert_query_result( r''' WITH x := default::color_enum_t.RED SELECT x; ''', {'RED'}, ) async", "test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query( \"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"']) await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED'])", "mean 'RED'?\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RAD' ) async def test_edgeql_enums_pathsyntax_02(self):", "['\"RED\"']) await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED']) async def", "to operands of type \" r\"'std::str' and 'default::color_enum_t'\"): await self.con.execute(r''' INSERT Foo {", "has no member called 'RAD'\", _hint=\"did you mean 'RED'?\"): async with self._run_and_rollback(): await", "''', {'GREEN'}, ) await self.assert_query_result( r''' SELECT default::color_enum_t.BLUE; ''', {'BLUE'}, ) await self.assert_query_result(", "async def test_edgeql_enums_assignment_01(self): # testing the INSERT assignment cast await self.con.execute( r''' INSERT", "support backlink\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.<RED' ) with self.assertRaisesRegex( edgedb.QueryError,", "and # limitations under the License. # import os.path import edgedb from edb.testbase", "'BLUE'}; ''', {'RED', 'GREEN', 'BLUE'}, ) async def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid", "r'invalid input value for enum .+color_enum_t.+red'): await self.con.execute(r''' SELECT <color_enum_t>'red'; ''') async def", "}], ) async def test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query( \"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"']) await", "with self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+' cannot be applied to operands of type \"", "self.assert_query_result( r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''', {'RED', 'GREEN', 'BLUE'}, ) async def", "name\"): async with self._run_and_rollback(): await self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression", "{'BLUE'}, ) await self.assert_query_result( r''' WITH x := default::color_enum_t.RED SELECT x; ''', {'RED'},", "color := 'BLUE' }; ''') await self.assert_query_result( r''' SELECT 'The test color is:", "# # Copyright 2019-present MagicStack Inc. and the EdgeDB authors. # # Licensed", "r''' INSERT Foo { color := 'RED' }; ''') # testing the UPDATE", "async def test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query( \"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"']) await self.assert_query_result( \"SELECT", "''') # testing the UPDATE assignment cast await self.con.execute( r''' UPDATE Bar SET", "expression\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RED.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid", "source file is part of the EdgeDB open source project. # # Copyright", "async def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r''' SELECT color_enum_t.GREEN; ''', {'GREEN'}, ) await self.assert_query_result(", "def test_edgeql_enums_assignment_03(self): # testing the INSERT assignment cast await self.con.execute( r''' INSERT Bar;", "self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json string or null; got json number'): await self.con.execute(\"SELECT <color_enum_t><json>12\")", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "r''' UPDATE Foo SET { color := 'GREEN' }; ''') await self.assert_query_result( r'''", "input value for enum .+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self): with", "{'RED', 'GREEN', 'BLUE'}, ) async def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum member name\"): async with self._run_and_rollback():", "edgedb.QueryError, \"enum types do not support backlink\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT", "'BLUE'}, ) async def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum", "'WITH e := color_enum_t SELECT e.RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference to", "''') async def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+' cannot be applied to", "with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RAD' ) async def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r'''", "member name must follow enum type name in the path\"): async with self._run_and_rollback():", "import os.path import edgedb from edb.testbase import server as tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA", "with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json string or null; got json number'): await self.con.execute(\"SELECT", "color_enum_t[IS color_enum_t].RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a primitive type", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "await self.assert_query_result( r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''', {'RED', 'GREEN', 'BLUE'}, ) async", "''', {'BLUE'}, ) await self.assert_query_result( r''' WITH x := default::color_enum_t.RED SELECT x; ''',", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "async def test_edgeql_enums_cast_05(self): await self.con.execute( r''' INSERT Foo { color := 'BLUE' };", "name must follow enum type name in the path\"): async with self._run_and_rollback(): await", "self._run_and_rollback(): await self.con.execute( 'WITH e := color_enum_t SELECT e.RED' ) with self.assertRaisesRegex( edgedb.QueryError,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "''') await self.assert_query_result( r''' SELECT Foo { color }; ''', [{ 'color': 'GREEN',", "self.assert_query_result( r''' SELECT color_enum_t.GREEN; ''', {'GREEN'}, ) await self.assert_query_result( r''' SELECT default::color_enum_t.BLUE; ''',", "''') await self.assert_query_result( r''' SELECT Bar { color }; ''', [{ 'color': 'GREEN',", "EdgeDB open source project. # # Copyright 2019-present MagicStack Inc. and the EdgeDB", "required by applicable law or agreed to in writing, software # distributed under", "r''' SELECT default::color_enum_t.BLUE; ''', {'BLUE'}, ) await self.assert_query_result( r''' WITH x := default::color_enum_t.RED", "async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RED.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property", "self._run_and_rollback(): await self.con.execute( 'WITH x := color_enum_t.RED SELECT x.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError,", "applicable law or agreed to in writing, software # distributed under the License", "reference to link property 'RED'\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t@RED' )", "'SELECT color_enum_t[IS color_enum_t].RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a primitive", "'WITH x := color_enum_t.RED SELECT x.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum has no", "async with self._run_and_rollback(): await self.con.execute( 'WITH x := color_enum_t.RED SELECT x.GREEN' ) with", "import edgedb from edb.testbase import server as tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__),", "x := color_enum_t.RED SELECT x.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum has no member", "self.con.query( \"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"']) await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result( \"SELECT", "enum member name must follow enum type name in the path\"): async with", "<color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED']) async def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError,", "edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+red'): await self.con.execute(r''' SELECT <color_enum_t>'red'; ''') async", "or agreed to in writing, software # distributed under the License is distributed", "def test_edgeql_enums_cast_01(self): await self.assert_query_result( r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''', {'RED', 'GREEN', 'BLUE'},", "async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t@RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum types", "member called 'RAD'\", _hint=\"did you mean 'RED'?\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT", "await self.assert_query_result( r''' SELECT Bar { color }; ''', [{ 'color': 'GREEN', }],", "cast await self.con.execute( r''' INSERT Foo { color := 'RED' }; ''') await", "self.assertRaisesRegex( edgedb.QueryError, \"enum types do not support backlink\"): async with self._run_and_rollback(): await self.con.execute(", "Foo { color }; ''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_assignment_03(self):", "++ <str>Foo.color; ''', ['The test color is: BLUE'], ) async def test_edgeql_enums_pathsyntax_01(self): with", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "[{ 'color': 'GREEN', }], ) async def test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query( \"SELECT <json><color_enum_t>'RED'\"", "SELECT 'The test color is: ' ++ <str>Foo.color; ''', ['The test color is:", "lacks an enum member name\"): async with self._run_and_rollback(): await self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex(", "await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED']) async def test_edgeql_enums_json_cast_02(self):", "edb.testbase import server as tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async", "'SELECT color_enum_t.RAD' ) async def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r''' SELECT color_enum_t.GREEN; ''', {'GREEN'},", "the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the", "\"enum types do not support backlink\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.<RED'", "with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t@RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum types do", "''') await self.assert_query_result( r''' SELECT 'The test color is: ' ++ <str>Foo.color; ''',", "\"unexpected reference to link property 'RED'\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t@RED'", "file is part of the EdgeDB open source project. # # Copyright 2019-present", "self.con.execute(r''' SELECT <color_enum_t>'red'; ''') async def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+' cannot", "await self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on", "Bar { color }; ''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_04(self):", "}; ''') await self.assert_query_result( r''' SELECT Bar { color }; ''', [{ 'color':", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "'The test color is: ' ++ Foo.color; ''') async def test_edgeql_enums_cast_05(self): await self.con.execute(", "writing, software # distributed under the License is distributed on an \"AS IS\"", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "SELECT Foo { color }; ''', [{ 'color': 'RED', }], ) async def", "self.con.execute( r''' UPDATE Foo SET { color := 'GREEN' }; ''') await self.assert_query_result(", "await self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED']) async def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input", "on a primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RED.GREEN' )", "License. # You may obtain a copy of the License at # #", "member name\"): async with self._run_and_rollback(): await self.con.execute( 'WITH e := color_enum_t SELECT e.RED'", "['RED']) await self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED']) async def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid", "self.assert_query_result( r''' SELECT Bar { color }; ''', [{ 'color': 'GREEN', }], )", "'SELECT color_enum_t@RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum types do not support backlink\"): async", ":= 'RED' }; ''') await self.assert_query_result( r''' SELECT Foo { color }; ''',", "r''' SELECT Foo { color }; ''', [{ 'color': 'RED', }], ) async", "be applied to operands of type \" r\"'std::str' and 'default::color_enum_t'\"): await self.con.execute(r''' INSERT", "test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+red'): await self.con.execute(r''' SELECT", "compliance with the License. # You may obtain a copy of the License", "test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r''' SELECT color_enum_t.GREEN; ''', {'GREEN'}, ) await self.assert_query_result( r''' SELECT", "'RED'?\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RAD' ) async def test_edgeql_enums_pathsyntax_02(self): await", "SET { color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Foo {", "enum .+color_enum_t.+red'): await self.con.execute(r''' SELECT <color_enum_t>'red'; ''') async def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError,", "''') await self.assert_query_result( r''' SELECT Foo { color }; ''', [{ 'color': 'RED',", "reference on a primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RED.GREEN'", ") await self.assert_query_result( r''' SELECT default::color_enum_t.BLUE; ''', {'BLUE'}, ) await self.assert_query_result( r''' WITH", "r''' INSERT Foo { color := 'RED' }; ''') await self.assert_query_result( r''' SELECT", "''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query( \"SELECT", ") with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a primitive type expression\"): async", "must follow enum type name in the path\"): async with self._run_and_rollback(): await self.con.execute(", "INSERT Foo { color := 'RED' }; ''') # testing the UPDATE assignment", "of the EdgeDB open source project. # # Copyright 2019-present MagicStack Inc. and", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "INSERT assignment cast await self.con.execute( r''' INSERT Bar; ''') await self.assert_query_result( r''' SELECT", "self.assert_query_result( r''' SELECT Foo { color }; ''', [{ 'color': 'GREEN', }], )", "an enum member name\"): async with self._run_and_rollback(): await self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex( edgedb.QueryError,", "\"SELECT <color_enum_t>'RED'\", ['RED']) async def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for", "self.assert_query_result( r''' SELECT Bar { color }; ''', [{ 'color': 'RED', }], )", "}; ''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query(", "await self.assert_query_result( r''' SELECT Foo { color }; ''', [{ 'color': 'GREEN', }],", "test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT", "# testing the INSERT assignment cast await self.con.execute( r''' INSERT Foo { color", "color := 'RED' }; ''') await self.assert_query_result( r''' SELECT Foo { color };", "await self.con.execute( r''' INSERT Foo { color := 'RED' }; ''') await self.assert_query_result(", "test color is: ' ++ Foo.color; ''') async def test_edgeql_enums_cast_05(self): await self.con.execute( r'''", "++ Foo.color; ''') async def test_edgeql_enums_cast_05(self): await self.con.execute( r''' INSERT Foo { color", "tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async def test_edgeql_enums_cast_01(self): await self.assert_query_result(", "async def test_edgeql_enums_cast_01(self): await self.assert_query_result( r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''', {'RED', 'GREEN',", "def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+red'): await self.con.execute(r'''", "not use this file except in compliance with the License. # You may", "}], ) async def test_edgeql_enums_assignment_04(self): await self.con.execute( r''' INSERT Bar; ''') # testing", "color_enum_t.RED SELECT x.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum has no member called 'RAD'\",", "self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json string or null;", "self.con.execute( r''' INSERT Foo { color := 'RED' }; ''') # testing the", "Foo { color }; ''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_02(self):", "r''' INSERT Bar; ''') # testing the UPDATE assignment cast await self.con.execute( r'''", "''') # testing the UPDATE assignment cast await self.con.execute( r''' UPDATE Foo SET", "License, Version 2.0 (the \"License\"); # you may not use this file except", "do not support backlink\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.<RED' ) with", "def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+YELLOW'): await self.con.execute(r'''", "}; SELECT 'The test color is: ' ++ Foo.color; ''') async def test_edgeql_enums_cast_05(self):", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "governing permissions and # limitations under the License. # import os.path import edgedb", "def test_edgeql_enums_cast_05(self): await self.con.execute( r''' INSERT Foo { color := 'BLUE' }; ''')", "self.con.execute( 'SELECT color_enum_t.<RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"an enum member name must follow", "}], ) async def test_edgeql_enums_assignment_02(self): await self.con.execute( r''' INSERT Foo { color :=", "of type \" r\"'std::str' and 'default::color_enum_t'\"): await self.con.execute(r''' INSERT Foo { color :=", "), ['\"RED\"']) await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED']) async", "MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License,", "self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+' cannot be applied to operands of type \" r\"'std::str'", "test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum member name\"): async", "Foo.color; ''') async def test_edgeql_enums_cast_05(self): await self.con.execute( r''' INSERT Foo { color :=", "# you may not use this file except in compliance with the License.", "edgedb.QueryError, r\"operator '\\+\\+' cannot be applied to operands of type \" r\"'std::str' and", "agreed to in writing, software # distributed under the License is distributed on", "color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Foo { color };", "# testing the UPDATE assignment cast await self.con.execute( r''' UPDATE Foo SET {", ":= default::color_enum_t.RED SELECT x; ''', {'RED'}, ) async def test_edgeql_enums_assignment_01(self): # testing the", "expression lacks an enum member name\"): async with self._run_and_rollback(): await self.con.execute( 'WITH e", "}; ''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_assignment_03(self): # testing the", "(the \"License\"); # you may not use this file except in compliance with", "edgedb.QueryError, \"enum has no member called 'RAD'\", _hint=\"did you mean 'RED'?\"): async with", "SELECT Bar { color }; ''', [{ 'color': 'RED', }], ) async def", "r''' UPDATE Bar SET { color := 'GREEN' }; ''') await self.assert_query_result( r'''", "color_enum_t].RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a primitive type expression\"):", "not support backlink\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.<RED' ) with self.assertRaisesRegex(", "import server as tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async def", "# Unless required by applicable law or agreed to in writing, software #", "with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RED.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference", "test_edgeql_enums_assignment_03(self): # testing the INSERT assignment cast await self.con.execute( r''' INSERT Bar; ''')", "by applicable law or agreed to in writing, software # distributed under the", "specific language governing permissions and # limitations under the License. # import os.path", "await self.assert_query_result( r''' SELECT Bar { color }; ''', [{ 'color': 'RED', }],", "testing the UPDATE assignment cast await self.con.execute( r''' UPDATE Bar SET { color", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", ") with self.assertRaisesRegex( edgedb.QueryError, \"enum has no member called 'RAD'\", _hint=\"did you mean", "cast await self.con.execute( r''' INSERT Bar; ''') await self.assert_query_result( r''' SELECT Bar {", "async def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json string or null; got json", "primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'WITH x := color_enum_t.RED SELECT", "self.assert_query_result( r''' SELECT Foo { color }; ''', [{ 'color': 'RED', }], )", "an enum member name\"): async with self._run_and_rollback(): await self.con.execute( 'WITH e := color_enum_t", ":= color_enum_t.RED SELECT x.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum has no member called", "def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json string or null; got json number'):", "def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+' cannot be applied to operands of", "''') async def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+red'):", "License. # import os.path import edgedb from edb.testbase import server as tb class", "{ color := 'BLUE' }; ''') await self.assert_query_result( r''' SELECT 'The test color", "file except in compliance with the License. # You may obtain a copy", "no member called 'RAD'\", _hint=\"did you mean 'RED'?\"): async with self._run_and_rollback(): await self.con.execute(", "value for enum .+color_enum_t.+red'): await self.con.execute(r''' SELECT <color_enum_t>'red'; ''') async def test_edgeql_enums_cast_04(self): with", "the License. # import os.path import edgedb from edb.testbase import server as tb", "follow enum type name in the path\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT", "under the License. # import os.path import edgedb from edb.testbase import server as", "with self._run_and_rollback(): await self.con.execute( 'WITH e := color_enum_t SELECT e.RED' ) with self.assertRaisesRegex(", ") with self.assertRaisesRegex( edgedb.QueryError, \"enum types do not support backlink\"): async with self._run_and_rollback():", "{ color := 'BLUE' }; SELECT 'The test color is: ' ++ Foo.color;", "License for the specific language governing permissions and # limitations under the License.", "color_enum_t.RED.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a primitive type expression\"):", "Bar { color }; ''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_json_cast_01(self):", "<color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json string or null; got", "self.con.execute( r''' UPDATE Bar SET { color := 'GREEN' }; ''') await self.assert_query_result(", "enum member name\"): async with self._run_and_rollback(): await self.con.execute( 'WITH e := color_enum_t SELECT", "# This source file is part of the EdgeDB open source project. #", "edgedb from edb.testbase import server as tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',", "to in writing, software # distributed under the License is distributed on an", "INSERT Foo { color := 'RED' }; ''') await self.assert_query_result( r''' SELECT Foo", "INSERT Bar; ''') await self.assert_query_result( r''' SELECT Bar { color }; ''', [{", "async def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+red'): await", "await self.assert_query_result( r''' SELECT color_enum_t.GREEN; ''', {'GREEN'}, ) await self.assert_query_result( r''' SELECT default::color_enum_t.BLUE;", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "type \" r\"'std::str' and 'default::color_enum_t'\"): await self.con.execute(r''' INSERT Foo { color := 'BLUE'", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "input value for enum .+color_enum_t.+red'): await self.con.execute(r''' SELECT <color_enum_t>'red'; ''') async def test_edgeql_enums_cast_04(self):", "with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property", "{ color }; ''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_json_cast_01(self): self.assertEqual(", "with self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference to link property 'RED'\"): async with self._run_and_rollback(): await", "<gh_stars>1000+ # # This source file is part of the EdgeDB open source", "<color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''', {'RED', 'GREEN', 'BLUE'}, ) async def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex(", "\"invalid property reference on a primitive type expression\"): async with self._run_and_rollback(): await self.con.execute(", "color_enum_t.<RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"an enum member name must follow enum type", "r''' INSERT Foo { color := 'BLUE' }; ''') await self.assert_query_result( r''' SELECT", "test_edgeql_enums_assignment_04(self): await self.con.execute( r''' INSERT Bar; ''') # testing the UPDATE assignment cast", "types do not support backlink\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.<RED' )", "or implied. # See the License for the specific language governing permissions and", "await self.con.execute( 'SELECT color_enum_t.RED.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a", "Foo { color := 'BLUE' }; ''') await self.assert_query_result( r''' SELECT 'The test", "SET { color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Bar {", "async def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+YELLOW'): await", "type expression\"): async with self._run_and_rollback(): await self.con.execute( 'WITH x := color_enum_t.RED SELECT x.GREEN'", "[{ 'color': 'GREEN', }], ) async def test_edgeql_enums_assignment_03(self): # testing the INSERT assignment", "async def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+: \"BANANA\"'):", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "color is: ' ++ <str>Foo.color; ''', ['The test color is: BLUE'], ) async", ") with self.assertRaisesRegex( edgedb.QueryError, \"an enum member name must follow enum type name", "[{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_02(self): await self.con.execute( r''' INSERT Foo", "lacks an enum member name\"): async with self._run_and_rollback(): await self.con.execute( 'WITH e :=", "TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async def test_edgeql_enums_cast_01(self): await self.assert_query_result( r''' SELECT", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "async def test_edgeql_enums_assignment_02(self): await self.con.execute( r''' INSERT Foo { color := 'RED' };", "in writing, software # distributed under the License is distributed on an \"AS", "enum .+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected", "await self.con.execute( r''' UPDATE Bar SET { color := 'GREEN' }; ''') await", "testing the INSERT assignment cast await self.con.execute( r''' INSERT Bar; ''') await self.assert_query_result(", "the UPDATE assignment cast await self.con.execute( r''' UPDATE Bar SET { color :=", "def test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query( \"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"']) await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\",", "edgedb.QueryError, \"an enum member name must follow enum type name in the path\"):", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "WITH x := default::color_enum_t.RED SELECT x; ''', {'RED'}, ) async def test_edgeql_enums_assignment_01(self): #", "'color': 'RED', }], ) async def test_edgeql_enums_assignment_04(self): await self.con.execute( r''' INSERT Bar; ''')", ":= color_enum_t SELECT e.RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference to link property", "'GREEN', 'BLUE'}, ) async def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for", "''', ['The test color is: BLUE'], ) async def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError,", "def test_edgeql_enums_assignment_02(self): await self.con.execute( r''' INSERT Foo { color := 'RED' }; ''')", "self.con.execute( 'SELECT color_enum_t.RED.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a primitive", "\"SELECT <color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED']) async def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex(", "property reference on a primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT", ") async def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+YELLOW'):", "''') await self.assert_query_result( r''' SELECT Bar { color }; ''', [{ 'color': 'RED',", "color := 'BLUE' }; SELECT 'The test color is: ' ++ Foo.color; ''')", "' ++ Foo.color; ''') async def test_edgeql_enums_cast_05(self): await self.con.execute( r''' INSERT Foo {", "to link property 'RED'\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t@RED' ) with", "SELECT e.RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference to link property 'RED'\"): async", "'GREEN', }], ) async def test_edgeql_enums_assignment_03(self): # testing the INSERT assignment cast await", "''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_assignment_03(self): # testing the INSERT", "permissions and # limitations under the License. # import os.path import edgedb from", "path expression lacks an enum member name\"): async with self._run_and_rollback(): await self.con.execute('SELECT color_enum_t')", "a primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'WITH x := color_enum_t.RED", "{ color := 'RED' }; ''') await self.assert_query_result( r''' SELECT Foo { color", "\"enum path expression lacks an enum member name\"): async with self._run_and_rollback(): await self.con.execute(", "self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+red'): await self.con.execute(r''' SELECT <color_enum_t>'red'; ''')", "EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "\"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"']) await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result( \"SELECT <color_enum_t>'RED'\",", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "INSERT Foo { color := 'BLUE' }; ''') await self.assert_query_result( r''' SELECT 'The", "SELECT default::color_enum_t.BLUE; ''', {'BLUE'}, ) await self.assert_query_result( r''' WITH x := default::color_enum_t.RED SELECT", "r''' SELECT Bar { color }; ''', [{ 'color': 'GREEN', }], ) async", ") async def test_edgeql_enums_assignment_02(self): await self.con.execute( r''' INSERT Foo { color := 'RED'", "Bar SET { color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Bar", "test color is: BLUE'], ) async def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError, \"enum path", "'RED', }], ) async def test_edgeql_enums_assignment_02(self): await self.con.execute( r''' INSERT Foo { color", "is: ' ++ <str>Foo.color; ''', ['The test color is: BLUE'], ) async def", "INSERT Bar; ''') # testing the UPDATE assignment cast await self.con.execute( r''' UPDATE", "_hint=\"did you mean 'RED'?\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RAD' ) async", "''') async def test_edgeql_enums_cast_05(self): await self.con.execute( r''' INSERT Foo { color := 'BLUE'", "expression\"): async with self._run_and_rollback(): await self.con.execute( 'WITH x := color_enum_t.RED SELECT x.GREEN' )", "Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version", "'GREEN', 'BLUE'}; ''', {'RED', 'GREEN', 'BLUE'}, ) async def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError,", "\"an enum member name must follow enum type name in the path\"): async", "'GREEN', }], ) async def test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query( \"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"'])", "use this file except in compliance with the License. # You may obtain", "test_edgeql_enums_assignment_02(self): await self.con.execute( r''' INSERT Foo { color := 'RED' }; ''') #", "self.con.execute( 'WITH x := color_enum_t.RED SELECT x.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum has", "'RAD'\", _hint=\"did you mean 'RED'?\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RAD' )", "color }; ''', [{ 'color': 'GREEN', }], ) async def test_edgeql_enums_json_cast_01(self): self.assertEqual( await", "self.con.execute( 'WITH e := color_enum_t SELECT e.RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference", "backlink\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.<RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"an", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "member name\"): async with self._run_and_rollback(): await self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex( edgedb.QueryError, \"enum path", "self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async", "enum .+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''') async def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError,", "'RED' }; ''') await self.assert_query_result( r''' SELECT Foo { color }; ''', [{", "{ color }; ''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_04(self): await", "'The test color is: ' ++ <str>Foo.color; ''', ['The test color is: BLUE'],", "self.assertEqual( await self.con.query( \"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"']) await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED']) await", "'schemas', 'enums.esdl') async def test_edgeql_enums_cast_01(self): await self.assert_query_result( r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''',", "<color_enum_t>'red'; ''') async def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+' cannot be applied", "in the path\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED' ) with", "test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json string or null; got json number'): await", "This source file is part of the EdgeDB open source project. # #", "2.0 (the \"License\"); # you may not use this file except in compliance", "self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''')", "link property 'RED'\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t@RED' ) with self.assertRaisesRegex(", "r''' SELECT 'The test color is: ' ++ <str>Foo.color; ''', ['The test color", "for the specific language governing permissions and # limitations under the License. #", "await self.con.execute( r''' INSERT Foo { color := 'BLUE' }; ''') await self.assert_query_result(", "await self.assert_query_result( r''' SELECT 'The test color is: ' ++ <str>Foo.color; ''', ['The", "async def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+' cannot be applied to operands", "SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async def test_edgeql_enums_cast_01(self): await self.assert_query_result( r''' SELECT <color_enum_t>{'RED',", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT", "Bar; ''') # testing the UPDATE assignment cast await self.con.execute( r''' UPDATE Bar", "# # Unless required by applicable law or agreed to in writing, software", "with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum member name\"): async with", "await self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum member", ") with self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference to link property 'RED'\"): async with self._run_and_rollback():", "Foo { color := 'RED' }; ''') await self.assert_query_result( r''' SELECT Foo {", ":= 'BLUE' }; ''') await self.assert_query_result( r''' SELECT 'The test color is: '", "express or implied. # See the License for the specific language governing permissions", "test_edgeql_enums_assignment_01(self): # testing the INSERT assignment cast await self.con.execute( r''' INSERT Foo {", "with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.<RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"an enum member", "r\"'std::str' and 'default::color_enum_t'\"): await self.con.execute(r''' INSERT Foo { color := 'BLUE' }; SELECT", "self.con.execute(r''' INSERT Foo { color := 'BLUE' }; SELECT 'The test color is:", "await self.con.execute( r''' INSERT Foo { color := 'RED' }; ''') # testing", "testing the UPDATE assignment cast await self.con.execute( r''' UPDATE Foo SET { color", "either express or implied. # See the License for the specific language governing", "self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t@RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum types do not", "}; ''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_02(self): await self.con.execute( r'''", "Foo SET { color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Foo", "await self.con.execute( 'SELECT color_enum_t.<RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"an enum member name must", "}; ''') await self.assert_query_result( r''' SELECT 'The test color is: ' ++ <str>Foo.color;", "INSERT Foo { color := 'BLUE' }; SELECT 'The test color is: '", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "Bar; ''') await self.assert_query_result( r''' SELECT Bar { color }; ''', [{ 'color':", "await self.con.execute(r''' SELECT <color_enum_t>'red'; ''') async def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+'", "async def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum member", "with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\")", "{ color }; ''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_02(self): await", "r''' SELECT color_enum_t.GREEN; ''', {'GREEN'}, ) await self.assert_query_result( r''' SELECT default::color_enum_t.BLUE; ''', {'BLUE'},", "type name in the path\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED'", "the INSERT assignment cast await self.con.execute( r''' INSERT Bar; ''') await self.assert_query_result( r'''", "async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RAD' ) async def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result(", "<color_enum_t>'RED'\", ['RED']) async def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum", "property 'RED'\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t@RED' ) with self.assertRaisesRegex( edgedb.QueryError,", "the License. # You may obtain a copy of the License at #", "with self._run_and_rollback(): await self.con.execute( 'WITH x := color_enum_t.RED SELECT x.GREEN' ) with self.assertRaisesRegex(", "<color_enum_t>'YELLOW'; ''') async def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum", "is: BLUE'], ) async def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "= os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async def test_edgeql_enums_cast_01(self): await self.assert_query_result( r''' SELECT <color_enum_t>{'RED', 'GREEN',", "'RED' }; ''') # testing the UPDATE assignment cast await self.con.execute( r''' UPDATE", "cast await self.con.execute( r''' UPDATE Bar SET { color := 'GREEN' }; ''')", "self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED']) async def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum member name\"):", "await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'expected json string or", "enum type name in the path\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t[IS", ":= 'RED' }; ''') # testing the UPDATE assignment cast await self.con.execute( r'''", "# import os.path import edgedb from edb.testbase import server as tb class TestEdgeQLEnums(tb.QueryTestCase):", "value for enum .+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex(", "primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RED.GREEN' ) with self.assertRaisesRegex(", "os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async def test_edgeql_enums_cast_01(self): await self.assert_query_result( r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'};", "path\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED' ) with self.assertRaisesRegex( edgedb.QueryError,", "self.assert_query_result( r''' WITH x := default::color_enum_t.RED SELECT x; ''', {'RED'}, ) async def", "default::color_enum_t.RED SELECT x; ''', {'RED'}, ) async def test_edgeql_enums_assignment_01(self): # testing the INSERT", "async def test_edgeql_enums_assignment_03(self): # testing the INSERT assignment cast await self.con.execute( r''' INSERT", "edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''') async", "}; ''') # testing the UPDATE assignment cast await self.con.execute( r''' UPDATE Foo", "open source project. # # Copyright 2019-present MagicStack Inc. and the EdgeDB authors.", "'GREEN' }; ''') await self.assert_query_result( r''' SELECT Bar { color }; ''', [{", "from edb.testbase import server as tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl')", "'BLUE' }; ''') await self.assert_query_result( r''' SELECT 'The test color is: ' ++", "SELECT <color_enum_t>'red'; ''') async def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+' cannot be", "and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0", "x.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum has no member called 'RAD'\", _hint=\"did you", "color_enum_t.RAD' ) async def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r''' SELECT color_enum_t.GREEN; ''', {'GREEN'}, )", "Copyright 2019-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the", "INSERT assignment cast await self.con.execute( r''' INSERT Foo { color := 'RED' };", "with the License. # You may obtain a copy of the License at", ") async def test_edgeql_enums_assignment_03(self): # testing the INSERT assignment cast await self.con.execute( r'''", "x; ''', {'RED'}, ) async def test_edgeql_enums_assignment_01(self): # testing the INSERT assignment cast", "async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.<RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"an enum", "expression lacks an enum member name\"): async with self._run_and_rollback(): await self.con.execute('SELECT color_enum_t') with", "a primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RED.GREEN' ) with", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "SELECT x.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum has no member called 'RAD'\", _hint=\"did", "edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def", "called 'RAD'\", _hint=\"did you mean 'RED'?\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RAD'", "server as tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async def test_edgeql_enums_cast_01(self):", "SELECT Bar { color }; ''', [{ 'color': 'GREEN', }], ) async def", "['The test color is: BLUE'], ) async def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError, \"enum", "'RED', }], ) async def test_edgeql_enums_assignment_04(self): await self.con.execute( r''' INSERT Bar; ''') #", "r''' INSERT Bar; ''') await self.assert_query_result( r''' SELECT Bar { color }; ''',", "with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a primitive type expression\"): async with", "the path\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED' ) with self.assertRaisesRegex(", "color_enum_t.GREEN; ''', {'GREEN'}, ) await self.assert_query_result( r''' SELECT default::color_enum_t.BLUE; ''', {'BLUE'}, ) await", "law or agreed to in writing, software # distributed under the License is", "source project. # # Copyright 2019-present MagicStack Inc. and the EdgeDB authors. #", "the License for the specific language governing permissions and # limitations under the", "cannot be applied to operands of type \" r\"'std::str' and 'default::color_enum_t'\"): await self.con.execute(r'''", "default::color_enum_t.BLUE; ''', {'BLUE'}, ) await self.assert_query_result( r''' WITH x := default::color_enum_t.RED SELECT x;", "is: ' ++ Foo.color; ''') async def test_edgeql_enums_cast_05(self): await self.con.execute( r''' INSERT Foo", "SELECT color_enum_t.GREEN; ''', {'GREEN'}, ) await self.assert_query_result( r''' SELECT default::color_enum_t.BLUE; ''', {'BLUE'}, )", "}], ) async def test_edgeql_enums_assignment_03(self): # testing the INSERT assignment cast await self.con.execute(", "await self.con.execute( r''' INSERT Bar; ''') await self.assert_query_result( r''' SELECT Bar { color", "SELECT x; ''', {'RED'}, ) async def test_edgeql_enums_assignment_01(self): # testing the INSERT assignment", "UPDATE Bar SET { color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "''', {'RED', 'GREEN', 'BLUE'}, ) async def test_edgeql_enums_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input", "'SELECT color_enum_t.RED.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a primitive type", ":= 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Foo { color }; ''',", "with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT <color_enum_t>'YELLOW';", "'color': 'GREEN', }], ) async def test_edgeql_enums_assignment_03(self): # testing the INSERT assignment cast", ":= 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Bar { color }; ''',", "\"enum path expression lacks an enum member name\"): async with self._run_and_rollback(): await self.con.execute('SELECT", "{'RED'}, ) async def test_edgeql_enums_assignment_01(self): # testing the INSERT assignment cast await self.con.execute(", "for enum .+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''') async def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex(", "self.con.execute( r''' INSERT Bar; ''') await self.assert_query_result( r''' SELECT Bar { color };", ".+color_enum_t.+red'): await self.con.execute(r''' SELECT <color_enum_t>'red'; ''') async def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError, r\"operator", "self.assert_query_result( r''' SELECT 'The test color is: ' ++ <str>Foo.color; ''', ['The test", "r'invalid input value for enum .+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self):", "self.con.execute( 'SELECT color_enum_t@RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum types do not support backlink\"):", "the INSERT assignment cast await self.con.execute( r''' INSERT Foo { color := 'RED'", "SELECT <color_enum_t>'YELLOW'; ''') async def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "SELECT Foo { color }; ''', [{ 'color': 'GREEN', }], ) async def", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "async with self._run_and_rollback(): await self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks", "the UPDATE assignment cast await self.con.execute( r''' UPDATE Foo SET { color :=", "UPDATE Foo SET { color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT", "await self.con.execute( r''' UPDATE Foo SET { color := 'GREEN' }; ''') await", "self.assert_query_result( r''' SELECT default::color_enum_t.BLUE; ''', {'BLUE'}, ) await self.assert_query_result( r''' WITH x :=", "}; ''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_04(self): await self.con.execute( r'''", "with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+red'): await self.con.execute(r''' SELECT <color_enum_t>'red';", "See the License for the specific language governing permissions and # limitations under", "color is: ' ++ Foo.color; ''') async def test_edgeql_enums_cast_05(self): await self.con.execute( r''' INSERT", "path expression lacks an enum member name\"): async with self._run_and_rollback(): await self.con.execute( 'WITH", "r''' SELECT Bar { color }; ''', [{ 'color': 'RED', }], ) async", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "'GREEN' }; ''') await self.assert_query_result( r''' SELECT Foo { color }; ''', [{", "\"enum has no member called 'RAD'\", _hint=\"did you mean 'RED'?\"): async with self._run_and_rollback():", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# testing the INSERT assignment cast await self.con.execute( r''' INSERT Bar; ''') await", "is part of the EdgeDB open source project. # # Copyright 2019-present MagicStack", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference on a primitive type expression\"): async with self._run_and_rollback():", "color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Bar { color };", "operands of type \" r\"'std::str' and 'default::color_enum_t'\"): await self.con.execute(r''' INSERT Foo { color", "assignment cast await self.con.execute( r''' UPDATE Bar SET { color := 'GREEN' };", "[{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_04(self): await self.con.execute( r''' INSERT Bar;", "part of the EdgeDB open source project. # # Copyright 2019-present MagicStack Inc.", "def test_edgeql_enums_json_cast_02(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value for enum .+color_enum_t.+: \"BANANA\"'): await", ":= 'BLUE' }; SELECT 'The test color is: ' ++ Foo.color; ''') async", "self.con.execute( r''' INSERT Foo { color := 'BLUE' }; ''') await self.assert_query_result( r'''", "e := color_enum_t SELECT e.RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference to link", "the specific language governing permissions and # limitations under the License. # import", "UPDATE assignment cast await self.con.execute( r''' UPDATE Bar SET { color := 'GREEN'", "'color': 'RED', }], ) async def test_edgeql_enums_assignment_02(self): await self.con.execute( r''' INSERT Foo {", "self.con.execute( 'SELECT color_enum_t.RAD' ) async def test_edgeql_enums_pathsyntax_02(self): await self.assert_query_result( r''' SELECT color_enum_t.GREEN; ''',", "authors. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "applied to operands of type \" r\"'std::str' and 'default::color_enum_t'\"): await self.con.execute(r''' INSERT Foo", "await self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''') async def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input", "x := default::color_enum_t.RED SELECT x; ''', {'RED'}, ) async def test_edgeql_enums_assignment_01(self): # testing", "for enum .+color_enum_t.+red'): await self.con.execute(r''' SELECT <color_enum_t>'red'; ''') async def test_edgeql_enums_cast_04(self): with self.assertRaisesRegex(", "{'GREEN'}, ) await self.assert_query_result( r''' SELECT default::color_enum_t.BLUE; ''', {'BLUE'}, ) await self.assert_query_result( r'''", "color }; ''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_02(self): await self.con.execute(", "def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum member name\"):", "SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''', {'RED', 'GREEN', 'BLUE'}, ) async def test_edgeql_enums_cast_02(self): with", "# testing the UPDATE assignment cast await self.con.execute( r''' UPDATE Bar SET {", "await self.assert_query_result( r''' SELECT default::color_enum_t.BLUE; ''', {'BLUE'}, ) await self.assert_query_result( r''' WITH x", "with self._run_and_rollback(): await self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an", "async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "testing the INSERT assignment cast await self.con.execute( r''' INSERT Foo { color :=", "}; ''') await self.assert_query_result( r''' SELECT Foo { color }; ''', [{ 'color':", "for enum .+color_enum_t.+: \"BANANA\"'): await self.con.execute(\"SELECT <color_enum_t><json>'BANANA'\") async def test_edgeql_enums_json_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError,", "type expression\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RED.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError,", "self.assertRaisesRegex( edgedb.QueryError, \"unexpected reference to link property 'RED'\"): async with self._run_and_rollback(): await self.con.execute(", "name in the path\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED' )", "language governing permissions and # limitations under the License. # import os.path import", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "await self.con.execute( 'SELECT color_enum_t@RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum types do not support", "await self.con.query( \"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"']) await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result(", "project. # # Copyright 2019-present MagicStack Inc. and the EdgeDB authors. # #", "'RED'\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t@RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum", "self.con.execute( r''' INSERT Bar; ''') # testing the UPDATE assignment cast await self.con.execute(", "<json><color_enum_t>'RED'\" ), ['\"RED\"']) await self.assert_query_result( \"SELECT <color_enum_t><json>'RED'\", ['RED']) await self.assert_query_result( \"SELECT <color_enum_t>'RED'\", ['RED'])", "the EdgeDB open source project. # # Copyright 2019-present MagicStack Inc. and the", "self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''') async def test_edgeql_enums_cast_03(self): with self.assertRaisesRegex( edgedb.InvalidValueError, r'invalid input value", "<str>Foo.color; ''', ['The test color is: BLUE'], ) async def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex(", "{ color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Foo { color", "color := 'RED' }; ''') # testing the UPDATE assignment cast await self.con.execute(", "and 'default::color_enum_t'\"): await self.con.execute(r''' INSERT Foo { color := 'BLUE' }; SELECT 'The", "color_enum_t') with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum member name\"): async", "''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_02(self): await self.con.execute( r''' INSERT", "r\"operator '\\+\\+' cannot be applied to operands of type \" r\"'std::str' and 'default::color_enum_t'\"):", "with self.assertRaisesRegex( edgedb.QueryError, \"an enum member name must follow enum type name in", "# Copyright 2019-present MagicStack Inc. and the EdgeDB authors. # # Licensed under", "name\"): async with self._run_and_rollback(): await self.con.execute( 'WITH e := color_enum_t SELECT e.RED' )", "await self.con.execute( 'WITH x := color_enum_t.RED SELECT x.GREEN' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum", ") async def test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query( \"SELECT <json><color_enum_t>'RED'\" ), ['\"RED\"']) await self.assert_query_result(", "edgedb.QueryError, \"enum path expression lacks an enum member name\"): async with self._run_and_rollback(): await", "await self.con.execute( 'WITH e := color_enum_t SELECT e.RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"unexpected", "test_edgeql_enums_cast_04(self): with self.assertRaisesRegex( edgedb.QueryError, r\"operator '\\+\\+' cannot be applied to operands of type", "os.path import edgedb from edb.testbase import server as tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA =", "r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''', {'RED', 'GREEN', 'BLUE'}, ) async def test_edgeql_enums_cast_02(self):", "self._run_and_rollback(): await self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an enum", "'color': 'GREEN', }], ) async def test_edgeql_enums_json_cast_01(self): self.assertEqual( await self.con.query( \"SELECT <json><color_enum_t>'RED'\" ),", "async def test_edgeql_enums_assignment_04(self): await self.con.execute( r''' INSERT Bar; ''') # testing the UPDATE", "'enums.esdl') async def test_edgeql_enums_cast_01(self): await self.assert_query_result( r''' SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'}; ''', {'RED',", "r'invalid input value for enum .+color_enum_t.+YELLOW'): await self.con.execute(r''' SELECT <color_enum_t>'YELLOW'; ''') async def", ") async def test_edgeql_enums_assignment_01(self): # testing the INSERT assignment cast await self.con.execute( r'''", "as tb class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async def test_edgeql_enums_cast_01(self): await", "2019-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache", "'BLUE' }; SELECT 'The test color is: ' ++ Foo.color; ''') async def", "reference on a primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'WITH x", "Foo { color := 'RED' }; ''') # testing the UPDATE assignment cast", "{ color := 'GREEN' }; ''') await self.assert_query_result( r''' SELECT Bar { color", "on a primitive type expression\"): async with self._run_and_rollback(): await self.con.execute( 'WITH x :=", "self.assertRaisesRegex( edgedb.QueryError, \"an enum member name must follow enum type name in the", "r''' SELECT Foo { color }; ''', [{ 'color': 'GREEN', }], ) async", "with self.assertRaisesRegex( edgedb.QueryError, \"enum has no member called 'RAD'\", _hint=\"did you mean 'RED'?\"):", "enum member name\"): async with self._run_and_rollback(): await self.con.execute('SELECT color_enum_t') with self.assertRaisesRegex( edgedb.QueryError, \"enum", "assignment cast await self.con.execute( r''' UPDATE Foo SET { color := 'GREEN' };", "you mean 'RED'?\"): async with self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t.RAD' ) async def", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "color_enum_t@RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"enum types do not support backlink\"): async with", "await self.con.execute( r''' INSERT Bar; ''') # testing the UPDATE assignment cast await", "def test_edgeql_enums_assignment_04(self): await self.con.execute( r''' INSERT Bar; ''') # testing the UPDATE assignment", "self._run_and_rollback(): await self.con.execute( 'SELECT color_enum_t[IS color_enum_t].RED' ) with self.assertRaisesRegex( edgedb.QueryError, \"invalid property reference", "'default::color_enum_t'\"): await self.con.execute(r''' INSERT Foo { color := 'BLUE' }; SELECT 'The test", "# # This source file is part of the EdgeDB open source project.", "''', [{ 'color': 'RED', }], ) async def test_edgeql_enums_assignment_04(self): await self.con.execute( r''' INSERT", "class TestEdgeQLEnums(tb.QueryTestCase): SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas', 'enums.esdl') async def test_edgeql_enums_cast_01(self): await self.assert_query_result( r'''", "\" r\"'std::str' and 'default::color_enum_t'\"): await self.con.execute(r''' INSERT Foo { color := 'BLUE' };", "BLUE'], ) async def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression lacks an", "color is: BLUE'], ) async def test_edgeql_enums_pathsyntax_01(self): with self.assertRaisesRegex( edgedb.QueryError, \"enum path expression", "r''' WITH x := default::color_enum_t.RED SELECT x; ''', {'RED'}, ) async def test_edgeql_enums_assignment_01(self):" ]
[ "numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50, 50)) DVFBox = QLineEdit(self) DVFBox.move(55 + (1+i)*120, 112)", "class set_memristor_parameters(QMainWindow): #Create and launch the main window def __init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__()", "QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90, 50)) self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):')", "= QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates", "QPushButton('OK', self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont = QFont('Times',", "* from PyQt5.QtCore import * \"\"\"Class to take in various paramters of the", "353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK and Cancel button self.OKButton = QPushButton('OK', self) self.OKButton.resize(100,", "= [] #Crestes the various widgets to take in Memristor Paramters for i", "self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text()))", "QLineEdit(self) W_0Box.move(55 + (1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self) mobilityBox.move(55 + (1+i)*120,", "self).__init__() self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors = numberOfMemristors self.windowLength = 110 * self.numberOfMemristors +", "self.titleLabel.setText('Memristor Parameters') self.titleFont = QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120,", "self.OKButton = QPushButton('OK', self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont", "62, 50, 50)) DVFBox = QLineEdit(self) DVFBox.move(55 + (1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox", "self.labelFont = QFont(\"Arial\", 13, QFont.Bold) self.buttonFont = QFont('Times', 13) self.home() self.show() #Create the", "Cancel button self.OKButton = QPushButton('OK', self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton {color:", "False self.numberOfMemristors = numberOfMemristors self.windowLength = 110 * self.numberOfMemristors + 280 self.windowBreadth =", "self.DLabel.setGeometry(QRect(55, 100, 70, 50)) self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90,", "* 10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter", "QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts self.labelFont = QFont(\"Arial\", 13, QFont.Bold) self.buttonFont =", "getMemristorParamters(self): parameterDictionary = {} parameterDictionary['D'] = self.D[:] parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off'] = self.R_off[:]", "from PyQt5.QtWidgets import * from PyQt5.QtCore import * \"\"\"Class to take in various", "* self.numberOfMemristors + 280 self.windowBreadth = 550 self.setGeometry(300, 300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters')", "self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont = QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton", "self.DValueFields = [] self.R_onValueFields = [] self.R_offValueFields = [] self.W_0ValueFields = [] self.mobilityValueFields", "25) self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self) R_offBox.move(55 + (1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box =", "self.setMemristorParametersOKButtonClicked = True self.D = [] self.R_on = [] self.R_off = [] self.W_0", "''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions def getMemristorParamters(self): parameterDictionary = {}", "simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create and launch the main window def __init__(self, numberOfMemristors): super(set_memristor_parameters,", "192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self) W_0Box.move(55 + (1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox", "def __init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors = numberOfMemristors self.windowLength =", "= QPushButton('Cancel', self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont =", "70, 50)) self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90, 50)) self.RoFFLabel", "self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image backgroundImage = QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette =", "Created on Mon Mar 18 18:54:20 2019 @author: abhigyan \"\"\" from PyQt5.QtGui import", "[] self.R_offValueFields = [] self.W_0ValueFields = [] self.mobilityValueFields = [] self.polarityValueFields = []", "self.polLabel.setGeometry(QRect(22, 300, 100, 50)) self.typeLabel = QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100, 50))", "self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50)) self.WLabel = QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220,", "= True self.D = [] self.R_on = [] self.R_off = [] self.W_0 =", "self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50)) #Parameter", "(\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100, 50)) self.typeLabel = QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340,", "mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self) polarityBox.move(55 + (1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox =", "DVFBox = QLineEdit(self) DVFBox.move(55 + (1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self) R_oNBox.move(55", "readParameters(self): self.setMemristorParametersOKButtonClicked = True self.D = [] self.R_on = [] self.R_off = []", "Mon Mar 18 18:54:20 2019 @author: abhigyan \"\"\" from PyQt5.QtGui import * from", "(1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self) R_oNBox.move(55 + (1+i)*120, 152) R_oNBox.resize(60, 25)", "else: self.D.append(None) if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text())) else:", "PyQt5.QtCore import * \"\"\"Class to take in various paramters of the Memristors to", "QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette = QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts", "[] self.W_0 = [] self.mobility = [] self.polarity = [] self.type = []", "from PyQt5.QtCore import * \"\"\"Class to take in various paramters of the Memristors", "14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50)) #Parameter labels self.DLabel = QLabel(self)", "= QLineEdit(self) mobilityBox.move(55 + (1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self) polarityBox.move(55 +", "parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity'] = self.polarity[:] parameterDictionary['type'] = self.type[:] return", "QLineEdit(self) DVFBox.move(55 + (1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self) R_oNBox.move(55 + (1+i)*120,", "#Sets Fonts self.labelFont = QFont(\"Arial\", 13, QFont.Bold) self.buttonFont = QFont('Times', 13) self.home() self.show()", "= QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts self.labelFont = QFont(\"Arial\", 13, QFont.Bold) self.buttonFont", "500, 50)) #Device numbers title self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\", 14,", "self.cancelButton.clicked.connect(self.close) #Reads the parameters input by user def readParameters(self): self.setMemristorParametersOKButtonClicked = True self.D", "self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70, 50)) self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140,", "W_0Box = QLineEdit(self) W_0Box.move(55 + (1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self) mobilityBox.move(55", "140, 90, 50)) self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50))", "50)) #Stores widgets to take in parameters self.DValueFields = [] self.R_onValueFields = []", "self.type = [] self.pValues= [] for i in range(0, self.numberOfMemristors): if(self.DValueFields[i].text() != ''):", "mobilityBox.move(55 + (1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self) polarityBox.move(55 + (1+i)*120, 312)", "= QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50, 50)) DVFBox", "(\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100, 50)) self.polLabel = QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22,", "90, 50)) self.WLabel = QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90, 50)) self.mobLabel", "self.typeLabel.setGeometry(QRect(73, 340, 100, 50)) #Stores widgets to take in parameters self.DValueFields = []", "numberOfMemristors self.windowLength = 110 * self.numberOfMemristors + 280 self.windowBreadth = 550 self.setGeometry(300, 300,", "for i in range(0, self.numberOfMemristors): numberLabel = QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\", 14,", "#Create the homescreen def home(self): #Window title self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont", "if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text()", "= [] self.R_offValueFields = [] self.W_0ValueFields = [] self.mobilityValueFields = [] self.polarityValueFields =", "self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel', self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}')", "50)) DVFBox = QLineEdit(self) DVFBox.move(55 + (1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self)", "self.pValues= [] for i in range(0, self.numberOfMemristors): if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text()) * 10**-9)", "self.memristorTypeValueFields.append(comboBox) #Creates OK and Cancel button self.OKButton = QPushButton('OK', self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2", "else: self.R_on.append(None) if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else:", "13, QFont.Bold) self.buttonFont = QFont('Times', 13) self.home() self.show() #Create the homescreen def home(self):", "(\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90, 50)) self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36,", "self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont = QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont)", "Mar 18 18:54:20 2019 @author: abhigyan \"\"\" from PyQt5.QtGui import * from PyQt5.QtWidgets", "self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50)) self.WLabel = QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont)", "self.windowLength = 110 * self.numberOfMemristors + 280 self.windowBreadth = 550 self.setGeometry(300, 300, self.windowLength,", "100, 50)) #Parameter labels self.DLabel = QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70,", "90, 50)) self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100, 50)) self.polLabel", "(nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70, 50)) self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37,", "= QLineEdit(self) polarityBox.move(55 + (1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov')", "self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100, 50)) #Stores widgets to take in parameters self.DValueFields =", "self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont = QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel', self)", "self.numberOfMemristors): if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text()) * 10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text()))", "* 10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text() != ''):", "self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self) W_0Box.move(55 + (1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self)", "QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500, 50)) #Device numbers title", "for i in range(0, self.numberOfMemristors): if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text()) * 10**-9) else: self.D.append(None)", "+ (1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self) R_oNBox.move(55 + (1+i)*120, 152) R_oNBox.resize(60,", "parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity'] = self.polarity[:] parameterDictionary['type']", "260, 100, 50)) self.polLabel = QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100, 50))", "[] self.W_0ValueFields = [] self.mobilityValueFields = [] self.polarityValueFields = [] self.memristorTypeValueFields = []", "= QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70, 50)) self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on", "= [] self.pValues= [] for i in range(0, self.numberOfMemristors): if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text())", "2019 @author: abhigyan \"\"\" from PyQt5.QtGui import * from PyQt5.QtWidgets import * from", "paramters of the Memristors to be simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create and launch the", "of the Memristors to be simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create and launch the main", "= [] self.mobilityValueFields = [] self.polarityValueFields = [] self.memristorTypeValueFields = [] #Crestes the", "QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50))", "50)) self.WLabel = QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90, 50)) self.mobLabel =", "parameters input by user def readParameters(self): self.setMemristorParametersOKButtonClicked = True self.D = [] self.R_on", "self.mobilityValueFields = [] self.polarityValueFields = [] self.memristorTypeValueFields = [] #Crestes the various widgets", "18 18:54:20 2019 @author: abhigyan \"\"\" from PyQt5.QtGui import * from PyQt5.QtWidgets import", "#Reads the parameters input by user def readParameters(self): self.setMemristorParametersOKButtonClicked = True self.D =", "self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12))", "self.show() #Create the homescreen def home(self): #Window title self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor Parameters')", "#Device numbers title self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}')", "range(0, self.numberOfMemristors): if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text()) * 10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text() != ''):", "self.memristorTypeValueFields = [] #Crestes the various widgets to take in Memristor Paramters for", "self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50)) #Parameter labels self.DLabel = QLabel(self) self.DLabel.setText('D (nm):')", "self.D.append(None) if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None)", "= self.mobility[:] parameterDictionary['polarity'] = self.polarity[:] parameterDictionary['type'] = self.type[:] return parameterDictionary def getOKButton(self): return", "self.W_0[:] parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity'] = self.polarity[:] parameterDictionary['type'] = self.type[:] return parameterDictionary def", "50)) self.typeLabel = QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100, 50)) #Stores widgets to", "= QLineEdit(self) R_oNBox.move(55 + (1+i)*120, 152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self) R_offBox.move(55", "self.DeviceLabelFont = QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50)) #Parameter labels", "self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image backgroundImage = QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth))", "i in range(0, self.numberOfMemristors): if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text()) * 10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text()", "self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90, 50)) self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180,", "= QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100, 50)) #Stores widgets to take in", "self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50)) self.WLabel = QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90,", "else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions def getMemristorParamters(self): parameterDictionary = {} parameterDictionary['D'] =", "self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50)) #Parameter labels self.DLabel = QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont)", "self.windowBreadth)) palette = QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts self.labelFont = QFont(\"Arial\", 13,", "self.mobility = [] self.polarity = [] self.type = [] self.pValues= [] for i", "range(0, self.numberOfMemristors): numberLabel = QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont)", "[] self.memristorTypeValueFields = [] #Crestes the various widgets to take in Memristor Paramters", "self.cancelButton = QPushButton('Cancel', self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont", "473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont = QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel',", "Image backgroundImage = QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette = QPalette() palette.setBrush(10, QBrush(backgroundScaledImage))", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Mon Mar 18", "312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120,", "(1+i)*120, 62, 50, 50)) DVFBox = QLineEdit(self) DVFBox.move(55 + (1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox)", "self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90, 50)) self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260,", "widgets to take in parameters self.DValueFields = [] self.R_onValueFields = [] self.R_offValueFields =", "by user def readParameters(self): self.setMemristorParametersOKButtonClicked = True self.D = [] self.R_on = []", "-*- \"\"\" Created on Mon Mar 18 18:54:20 2019 @author: abhigyan \"\"\" from", "= QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette = QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets", "if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions def getMemristorParamters(self): parameterDictionary", "= [] self.R_off = [] self.W_0 = [] self.mobility = [] self.polarity =", "!= ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else: self.mobility.append(None)", "various paramters of the Memristors to be simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create and launch", "(1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 +", "self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500, 50)) #Device numbers title self.DeviceLabel = QLabel(self)", "self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self) mobilityBox.move(55 + (1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self)", "R_oNBox = QLineEdit(self) R_oNBox.move(55 + (1+i)*120, 152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self)", "and launch the main window def __init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked = False", "in parameters self.DValueFields = [] self.R_onValueFields = [] self.R_offValueFields = [] self.W_0ValueFields =", "[] self.R_on = [] self.R_off = [] self.W_0 = [] self.mobility = []", "self.DLabel = QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70, 50)) self.RoNLabel = QLabel(self)", "self.R_off[:] parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity'] = self.polarity[:] parameterDictionary['type'] = self.type[:]", "def getMemristorParamters(self): parameterDictionary = {} parameterDictionary['D'] = self.D[:] parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off'] =", "self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90, 50)) self.RoFFLabel = QLabel(self)", "[] for i in range(0, self.numberOfMemristors): if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text()) * 10**-9) else:", "= QFont('Times', 13) self.home() self.show() #Create the homescreen def home(self): #Window title self.titleLabel", "= QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50)) #Parameter labels self.DLabel", "main window def __init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors = numberOfMemristors", "QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100, 50)) self.typeLabel = QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont)", "numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50, 50)) DVFBox = QLineEdit(self) DVFBox.move(55 +", "= QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120,", "R_oNBox.move(55 + (1+i)*120, 152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self) R_offBox.move(55 + (1+i)*120,", "{color: darkgreen;}') self.OKButtonFont = QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel', self) self.cancelButton.resize(100,", "(\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50)) self.WLabel = QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33,", "= QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90, 50)) self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility", "#Creates OK and Cancel button self.OKButton = QPushButton('OK', self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150,", "+ (1+i)*120, 62, 50, 50)) DVFBox = QLineEdit(self) DVFBox.move(55 + (1+i)*120, 112) DVFBox.resize(60,25)", "self.R_on = [] self.R_off = [] self.W_0 = [] self.mobility = [] self.polarity", "QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel', self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 , 473)", "title self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35,", "= 550 self.setGeometry(300, 300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image backgroundImage", "self.cancelButtonFont = QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the parameters input by user def", "QPushButton('Cancel', self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont = QFont('Times',", "= {} parameterDictionary['D'] = self.D[:] parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0'] =", "self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90, 50)) self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont)", "else: self.mobility.append(None) if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions def", "= self.R_on[:] parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity'] =", "DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self) R_oNBox.move(55 + (1+i)*120, 152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox", "#Parameter labels self.DLabel = QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70, 50)) self.RoNLabel", "(nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90, 50)) self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19,", "palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts self.labelFont = QFont(\"Arial\", 13, QFont.Bold) self.buttonFont = QFont('Times',", "(1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self) polarityBox.move(55 + (1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox)", "+ (1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self) polarityBox.move(55 + (1+i)*120, 312) polarityBox.resize(60,25)", "darkgreen;}') self.OKButtonFont = QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel', self) self.cancelButton.resize(100, 40)", "if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text())) else:", "- 120, 10, 500, 50)) #Device numbers title self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont", "self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120, 353) comboBox.resize(80,25)", "the main window def __init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors =", "QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120, 62,", "= QLineEdit(self) W_0Box.move(55 + (1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self) mobilityBox.move(55 +", "comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK and Cancel button self.OKButton = QPushButton('OK', self) self.OKButton.resize(100, 40)", "QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90, 50)) self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):')", "QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500, 50)) #Device", "self.mobLabel.setGeometry(QRect(19, 260, 100, 50)) self.polLabel = QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100,", "120, 10, 500, 50)) #Device numbers title self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont =", "+ (1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55", "= [] self.W_0ValueFields = [] self.mobilityValueFields = [] self.polarityValueFields = [] self.memristorTypeValueFields =", "[] self.polarityValueFields = [] self.memristorTypeValueFields = [] #Crestes the various widgets to take", "self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image backgroundImage = QImage('memristor1.jpg') backgroundScaledImage =", "button self.OKButton = QPushButton('OK', self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}')", "utf-8 -*- \"\"\" Created on Mon Mar 18 18:54:20 2019 @author: abhigyan \"\"\"", "QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50, 50)) DVFBox =", "50)) self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50)) self.WLabel =", "self.R_onValueFields = [] self.R_offValueFields = [] self.W_0ValueFields = [] self.mobilityValueFields = [] self.polarityValueFields", "[] #Crestes the various widgets to take in Memristor Paramters for i in", "comboBox = QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox)", "QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont = QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 -", "labels self.DLabel = QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70, 50)) self.RoNLabel =", "R_offBox = QLineEdit(self) R_offBox.move(55 + (1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self) W_0Box.move(55", "QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70, 50)) self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):')", "* from PyQt5.QtWidgets import * from PyQt5.QtCore import * \"\"\"Class to take in", "__init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors = numberOfMemristors self.windowLength = 110", "= backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette = QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts self.labelFont =", "import * from PyQt5.QtCore import * \"\"\"Class to take in various paramters of", "Fonts self.labelFont = QFont(\"Arial\", 13, QFont.Bold) self.buttonFont = QFont('Times', 13) self.home() self.show() #Create", "self.R_off = [] self.W_0 = [] self.mobility = [] self.polarity = [] self.type", "50)) #Parameter labels self.DLabel = QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70, 50))", "= QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100, 50)) self.polLabel = QLabel(self) self.polLabel.setText('Polarity", "self.R_on[:] parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity'] = self.polarity[:]", "the Memristors to be simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create and launch the main window", "self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions def getMemristorParamters(self): parameterDictionary = {} parameterDictionary['D'] = self.D[:]", "self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500, 50)) #Device numbers title self.DeviceLabel =", "parameterDictionary = {} parameterDictionary['D'] = self.D[:] parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0']", "40) self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont = QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close)", "self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont = QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads", "self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100, 50)) self.polLabel = QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont)", "home(self): #Window title self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont = QFont(\"Times\", 18, QFont.Bold)", "self.polarity = [] self.type = [] self.pValues= [] for i in range(0, self.numberOfMemristors):", "18:54:20 2019 @author: abhigyan \"\"\" from PyQt5.QtGui import * from PyQt5.QtWidgets import *", "#Crestes the various widgets to take in Memristor Paramters for i in range(0,", "DVFBox.move(55 + (1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self) R_oNBox.move(55 + (1+i)*120, 152)", "!= ''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text() !=", "self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text() !=", "self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100, 50)) self.polLabel = QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300,", "numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50,", "self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors = numberOfMemristors self.windowLength = 110 * self.numberOfMemristors + 280", "import * from PyQt5.QtWidgets import * from PyQt5.QtCore import * \"\"\"Class to take", "10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions", "parameters self.DValueFields = [] self.R_onValueFields = [] self.R_offValueFields = [] self.W_0ValueFields = []", "13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel', self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton", "280 self.windowBreadth = 550 self.setGeometry(300, 300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund", "90, 50)) self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50)) self.WLabel", "[] self.R_onValueFields = [] self.R_offValueFields = [] self.W_0ValueFields = [] self.mobilityValueFields = []", "self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions def getMemristorParamters(self): parameterDictionary = {} parameterDictionary['D']", "self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the parameters input by user def readParameters(self): self.setMemristorParametersOKButtonClicked = True", "self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions def getMemristorParamters(self): parameterDictionary = {} parameterDictionary['D'] = self.D[:] parameterDictionary['R_on']", "= QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100, 50)) self.typeLabel = QLabel(self) self.typeLabel.setText('Type:')", "QLineEdit(self) R_offBox.move(55 + (1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self) W_0Box.move(55 + (1+i)*120,", "self.polLabel = QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100, 50)) self.typeLabel = QLabel(self)", "+ 280 self.windowBreadth = 550 self.setGeometry(300, 300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets", "to take in Memristor Paramters for i in range(0, self.numberOfMemristors): numberLabel = QLabel(self)", "-150, 473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont = QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton =", "40) self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont = QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters)", "def home(self): #Window title self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont = QFont(\"Times\", 18,", "= self.R_off[:] parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity'] = self.polarity[:] parameterDictionary['type'] =", "parameterDictionary['D'] = self.D[:] parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility']", "+ (1+i)*120, 152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self) R_offBox.move(55 + (1+i)*120, 192)", "self.mobility[:] parameterDictionary['polarity'] = self.polarity[:] parameterDictionary['type'] = self.type[:] return parameterDictionary def getOKButton(self): return self.setMemristorParametersOKButtonClicked", "self.numberOfMemristors): numberLabel = QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75", "(1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self) mobilityBox.move(55 + (1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox)", "if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text()) * 10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text())) else:", "Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image backgroundImage = QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette", "self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont = QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont)", "473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont = QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the parameters", "the parameters input by user def readParameters(self): self.setMemristorParametersOKButtonClicked = True self.D = []", "Memristors to be simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create and launch the main window def", "50)) self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90, 50)) self.RoFFLabel =", "100, 50)) self.typeLabel = QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100, 50)) #Stores widgets", "100, 50)) #Stores widgets to take in parameters self.DValueFields = [] self.R_onValueFields =", "Memristor Paramters for i in range(0, self.numberOfMemristors): numberLabel = QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont =", "self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont = QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the parameters input", "= QPushButton('OK', self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont =", "''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText())", "#comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK and Cancel button self.OKButton", "self.D.append(float(self.DValueFields[i].text()) * 10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text() !=", "340, 100, 50)) #Stores widgets to take in parameters self.DValueFields = [] self.R_onValueFields", "i in range(0, self.numberOfMemristors): numberLabel = QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\", 14, QFont.Bold)", "True self.D = [] self.R_on = [] self.R_off = [] self.W_0 = []", "QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50, 50)) DVFBox = QLineEdit(self) DVFBox.move(55", "be simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create and launch the main window def __init__(self, numberOfMemristors):", "#Getter functions def getMemristorParamters(self): parameterDictionary = {} parameterDictionary['D'] = self.D[:] parameterDictionary['R_on'] = self.R_on[:]", "W_0Box.move(55 + (1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self) mobilityBox.move(55 + (1+i)*120, 272)", "self.WLabel.setGeometry(QRect(33, 220, 90, 50)) self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100,", "= [] self.R_on = [] self.R_off = [] self.W_0 = [] self.mobility =", "the various widgets to take in Memristor Paramters for i in range(0, self.numberOfMemristors):", "#Sets backgorund Image backgroundImage = QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette = QPalette()", "self.setPalette(palette) #Sets Fonts self.labelFont = QFont(\"Arial\", 13, QFont.Bold) self.buttonFont = QFont('Times', 13) self.home()", "= False self.numberOfMemristors = numberOfMemristors self.windowLength = 110 * self.numberOfMemristors + 280 self.windowBreadth", "100, 70, 50)) self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90, 50))", "self.setGeometry(300, 300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image backgroundImage = QImage('memristor1.jpg')", "= QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50)) self.WLabel = QLabel(self) self.WLabel.setText('W_0", "[] self.pValues= [] for i in range(0, self.numberOfMemristors): if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text()) *", "= self.W_0[:] parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity'] = self.polarity[:] parameterDictionary['type'] = self.type[:] return parameterDictionary", "(1+i)*120, 152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self) R_offBox.move(55 + (1+i)*120, 192) R_offBox.resize(60,25)", "take in various paramters of the Memristors to be simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create", "QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50)) #Parameter labels self.DLabel =", "from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore import * \"\"\"Class", "self.close() #Getter functions def getMemristorParamters(self): parameterDictionary = {} parameterDictionary['D'] = self.D[:] parameterDictionary['R_on'] =", "self.WLabel = QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90, 50)) self.mobLabel = QLabel(self)", "take in parameters self.DValueFields = [] self.R_onValueFields = [] self.R_offValueFields = [] self.W_0ValueFields", "self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60,", "QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100, 50)) self.polLabel = QLabel(self) self.polLabel.setText('Polarity (\\u03B7):')", "(1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK and Cancel button self.OKButton = QPushButton('OK', self)", "\"\"\" from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore import *", "18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500, 50)) #Device numbers", "self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100, 50)) self.typeLabel = QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100,", "backgroundImage = QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette = QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette)", "QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK", "300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image backgroundImage = QImage('memristor1.jpg') backgroundScaledImage", "various widgets to take in Memristor Paramters for i in range(0, self.numberOfMemristors): numberLabel", "take in Memristor Paramters for i in range(0, self.numberOfMemristors): numberLabel = QLabel(self) numberLabel.setText(str(i+1))", "100, 50)) self.polLabel = QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100, 50)) self.typeLabel", "super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors = numberOfMemristors self.windowLength = 110 * self.numberOfMemristors", "parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity']", "R_offBox.move(55 + (1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self) W_0Box.move(55 + (1+i)*120, 232)", "-*- coding: utf-8 -*- \"\"\" Created on Mon Mar 18 18:54:20 2019 @author:", "W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self) mobilityBox.move(55 + (1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox =", "in range(0, self.numberOfMemristors): numberLabel = QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}')", "#Stores widgets to take in parameters self.DValueFields = [] self.R_onValueFields = [] self.R_offValueFields", "set_memristor_parameters(QMainWindow): #Create and launch the main window def __init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked", "numberLabelFont = QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50, 50))", "50)) #Device numbers title self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\", 14, QFont.Bold)", "13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the parameters input by user def readParameters(self): self.setMemristorParametersOKButtonClicked =", "Parameters') self.titleFont = QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10,", "= [] self.R_onValueFields = [] self.R_offValueFields = [] self.W_0ValueFields = [] self.mobilityValueFields =", "= [] self.polarityValueFields = [] self.memristorTypeValueFields = [] #Crestes the various widgets to", "self.typeLabel = QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100, 50)) #Stores widgets to take", "QFont.Bold) self.buttonFont = QFont('Times', 13) self.home() self.show() #Create the homescreen def home(self): #Window", "self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100, 70, 50)) self.RoNLabel = QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont)", "self.D[:] parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility'] = self.mobility[:]", "= QLineEdit(self) R_offBox.move(55 + (1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self) W_0Box.move(55 +", "{} parameterDictionary['D'] = self.D[:] parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0'] = self.W_0[:]", "= QFont(\"Arial\", 13, QFont.Bold) self.buttonFont = QFont('Times', 13) self.home() self.show() #Create the homescreen", "180, 90, 50)) self.WLabel = QLabel(self) self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90, 50))", "backgorund Image backgroundImage = QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette = QPalette() palette.setBrush(10,", "QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50)) self.WLabel = QLabel(self) self.WLabel.setText('W_0 (nm):')", "= QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel', self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 ,", "QLineEdit(self) R_oNBox.move(55 + (1+i)*120, 152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self) R_offBox.move(55 +", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Mar 18 18:54:20 2019", "self.RoNLabel.setGeometry(QRect(37, 140, 90, 50)) self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90,", "self.R_offValueFields = [] self.W_0ValueFields = [] self.mobilityValueFields = [] self.polarityValueFields = [] self.memristorTypeValueFields", "13) self.home() self.show() #Create the homescreen def home(self): #Window title self.titleLabel = QLabel(self)", "PyQt5.QtWidgets import * from PyQt5.QtCore import * \"\"\"Class to take in various paramters", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Mon Mar 18 18:54:20", "{color: darkgreen;}') self.cancelButtonFont = QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the parameters input by", "!= ''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None)", "110 * self.numberOfMemristors + 280 self.windowBreadth = 550 self.setGeometry(300, 300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor", "[] self.R_off = [] self.W_0 = [] self.mobility = [] self.polarity = []", "60, 100, 50)) #Parameter labels self.DLabel = QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55, 100,", "in range(0, self.numberOfMemristors): if(self.DValueFields[i].text() != ''): self.D.append(float(self.DValueFields[i].text()) * 10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text() !=", "self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel', self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton {color:", "= self.D[:] parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off'] = self.R_off[:] parameterDictionary['W_0'] = self.W_0[:] parameterDictionary['mobility'] =", "[] self.polarity = [] self.type = [] self.pValues= [] for i in range(0,", "polarityBox.move(55 + (1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek')", "self.W_0ValueFields = [] self.mobilityValueFields = [] self.polarityValueFields = [] self.memristorTypeValueFields = [] #Crestes", "self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off (\\u03A9):') self.RoFFLabel.setFont(self.labelFont) self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50)) self.WLabel = QLabel(self)", "+ (1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self) W_0Box.move(55 + (1+i)*120, 232) W_0Box.resize(60,25)", "!= ''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text() !=", "10, 500, 50)) #Device numbers title self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\",", "the homescreen def home(self): #Window title self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont =", "self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}') self.OKButtonFont = QFont('Times', 13)", "50)) self.polLabel = QLabel(self) self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100, 50)) self.typeLabel =", "= [] self.W_0 = [] self.mobility = [] self.polarity = [] self.type =", "+ (1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK and Cancel button self.OKButton = QPushButton('OK',", "= QLineEdit(self) DVFBox.move(55 + (1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self) R_oNBox.move(55 +", "self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100, 50)) self.polLabel = QLabel(self)", "else: self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text() != ''):", "backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette = QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts self.labelFont", "backgroundImage.scaled(QSize(self.windowLength, self.windowBreadth)) palette = QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts self.labelFont = QFont(\"Arial\",", "input by user def readParameters(self): self.setMemristorParametersOKButtonClicked = True self.D = [] self.R_on =", "#comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK and Cancel", "QFont(\"Arial\", 13, QFont.Bold) self.buttonFont = QFont('Times', 13) self.home() self.show() #Create the homescreen def", "self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50)) #Parameter labels self.DLabel = QLabel(self) self.DLabel.setText('D (nm):') self.DLabel.setFont(self.labelFont) self.DLabel.setGeometry(QRect(55,", "QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100, 50)) #Stores widgets to take in parameters", "else: self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text() *", "\"\"\" Created on Mon Mar 18 18:54:20 2019 @author: abhigyan \"\"\" from PyQt5.QtGui", "self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self) R_offBox.move(55 + (1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self)", "self.R_on.append(None) if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None)", ", 473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont = QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the", "!= ''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions def getMemristorParamters(self): parameterDictionary =", "= [] self.type = [] self.pValues= [] for i in range(0, self.numberOfMemristors): if(self.DValueFields[i].text()", "#Window title self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont = QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}')", "10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text()))", "''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''):", "self.home() self.show() #Create the homescreen def home(self): #Window title self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor", "numberLabel = QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\", 14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 +", "window def __init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors = numberOfMemristors self.windowLength", "232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self) mobilityBox.move(55 + (1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox", "!= ''): self.D.append(float(self.DValueFields[i].text()) * 10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None)", "coding: utf-8 -*- \"\"\" Created on Mon Mar 18 18:54:20 2019 @author: abhigyan", "#comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK and Cancel button", "parameterDictionary['mobility'] = self.mobility[:] parameterDictionary['polarity'] = self.polarity[:] parameterDictionary['type'] = self.type[:] return parameterDictionary def getOKButton(self):", "to take in various paramters of the Memristors to be simulated\"\"\" class set_memristor_parameters(QMainWindow):", "= QLabel(self) self.RoNLabel.setText('R_on (\\u03A9):') self.RoNLabel.setFont(self.labelFont) self.RoNLabel.setGeometry(QRect(37, 140, 90, 50)) self.RoFFLabel = QLabel(self) self.RoFFLabel.setText('R_off", "and Cancel button self.OKButton = QPushButton('OK', self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150, 473) self.OKButton.setStyleSheet('QPushButton", "self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self) R_oNBox.move(55 + (1+i)*120, 152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox =", "darkgreen;}') self.cancelButtonFont = QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the parameters input by user", "self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image backgroundImage = QImage('memristor1.jpg') backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength,", "QFont('Times', 13) self.home() self.show() #Create the homescreen def home(self): #Window title self.titleLabel =", "[] self.mobilityValueFields = [] self.polarityValueFields = [] self.memristorTypeValueFields = [] #Crestes the various", "def readParameters(self): self.setMemristorParametersOKButtonClicked = True self.D = [] self.R_on = [] self.R_off =", "numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors = numberOfMemristors self.windowLength = 110 *", "50)) self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100, 50)) self.polLabel =", "self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100, 50)) #Stores widgets to take in parameters self.DValueFields", "self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text()))", "= [] self.mobility = [] self.polarity = [] self.type = [] self.pValues= []", "PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore import * \"\"\"Class to", "in various paramters of the Memristors to be simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create and", "14, QFont.Bold) numberLabel.setStyleSheet('QLabel{color:blue}') numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50, 50)) DVFBox = QLineEdit(self)", "self.numberOfMemristors + 280 self.windowBreadth = 550 self.setGeometry(300, 300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico'))", "polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120, 353)", "550 self.setGeometry(300, 300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image backgroundImage =", "self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close()", "self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont = QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont)", "QLineEdit(self) polarityBox.move(55 + (1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self) comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis')", "= QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the parameters input by user def readParameters(self):", "functions def getMemristorParamters(self): parameterDictionary = {} parameterDictionary['D'] = self.D[:] parameterDictionary['R_on'] = self.R_on[:] parameterDictionary['R_off']", "palette = QPalette() palette.setBrush(10, QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts self.labelFont = QFont(\"Arial\", 13, QFont.Bold)", "\"\"\"Class to take in various paramters of the Memristors to be simulated\"\"\" class", "112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox = QLineEdit(self) R_oNBox.move(55 + (1+i)*120, 152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox)", "self.buttonFont = QFont('Times', 13) self.home() self.show() #Create the homescreen def home(self): #Window title", "to be simulated\"\"\" class set_memristor_parameters(QMainWindow): #Create and launch the main window def __init__(self,", "272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self) polarityBox.move(55 + (1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox", "numbers title self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont)", "= numberOfMemristors self.windowLength = 110 * self.numberOfMemristors + 280 self.windowBreadth = 550 self.setGeometry(300,", "widgets to take in Memristor Paramters for i in range(0, self.numberOfMemristors): numberLabel =", "50, 50)) DVFBox = QLineEdit(self) DVFBox.move(55 + (1+i)*120, 112) DVFBox.resize(60,25) self.DValueFields.append(DVFBox) R_oNBox =", "self.polLabel.setText('Polarity (\\u03B7):') self.polLabel.setFont(self.labelFont) self.polLabel.setGeometry(QRect(22, 300, 100, 50)) self.typeLabel = QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73,", "self.OKButtonFont = QFont('Times', 13) self.OKButton.setFont(self.OKButtonFont) self.OKButton.clicked.connect(self.readParameters) self.cancelButton = QPushButton('Cancel', self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2", "polarityBox = QLineEdit(self) polarityBox.move(55 + (1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self) comboBox.addItem('Ideal')", "#Create and launch the main window def __init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked =", "self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text()", "self.mobility.append(None) if(self.polarityValueFields[i].text() != ''): self.polarity.append(float(self.polarityValueFields[i].text())) else: self.polarity.append(None) self.type.append(self.memristorTypeValueFields[i].currentText()) self.close() #Getter functions def getMemristorParamters(self):", "* \"\"\"Class to take in various paramters of the Memristors to be simulated\"\"\"", "QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50)) #Parameter labels self.DLabel = QLabel(self) self.DLabel.setText('D", "''): self.D.append(float(self.DValueFields[i].text()) * 10**-9) else: self.D.append(None) if(self.R_onValueFields[i].text() != ''): self.R_on.append(float(self.R_onValueFields[i].text())) else: self.R_on.append(None) if(self.R_offValueFields[i].text()", "self.numberOfMemristors = numberOfMemristors self.windowLength = 110 * self.numberOfMemristors + 280 self.windowBreadth = 550", "= 110 * self.numberOfMemristors + 280 self.windowBreadth = 550 self.setGeometry(300, 300, self.windowLength, self.windowBreadth)", "= [] self.memristorTypeValueFields = [] #Crestes the various widgets to take in Memristor", "[] self.type = [] self.pValues= [] for i in range(0, self.numberOfMemristors): if(self.DValueFields[i].text() !=", "''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''):", "self.titleFont = QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500,", "comboBox.move(55 + (1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK and Cancel button self.OKButton =", "300, 100, 50)) self.typeLabel = QLabel(self) self.typeLabel.setText('Type:') self.typeLabel.setFont(self.labelFont) self.typeLabel.setGeometry(QRect(73, 340, 100, 50)) #Stores", "comboBox.addItem('Ideal') #comboBox3.addItem('Strukov') #comboBox.addItem('Prodromakis') #comboBox.addItem('Biolek') comboBox.move(55 + (1+i)*120, 353) comboBox.resize(80,25) self.memristorTypeValueFields.append(comboBox) #Creates OK and", "QFont('Times', 13) self.cancelButton.setFont(self.cancelButtonFont) self.cancelButton.clicked.connect(self.close) #Reads the parameters input by user def readParameters(self): self.setMemristorParametersOKButtonClicked", "user def readParameters(self): self.setMemristorParametersOKButtonClicked = True self.D = [] self.R_on = [] self.R_off", "= QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500, 50))", "= QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont = QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2", "launch the main window def __init__(self, numberOfMemristors): super(set_memristor_parameters, self).__init__() self.setMemristorParametersOKButtonClicked = False self.numberOfMemristors", "self.D = [] self.R_on = [] self.R_off = [] self.W_0 = [] self.mobility", "on Mon Mar 18 18:54:20 2019 @author: abhigyan \"\"\" from PyQt5.QtGui import *", "R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self) W_0Box.move(55 + (1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox =", "self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500, 50)) #Device numbers title self.DeviceLabel = QLabel(self) self.DeviceLabel.setText('Device:')", "QBrush(backgroundScaledImage)) self.setPalette(palette) #Sets Fonts self.labelFont = QFont(\"Arial\", 13, QFont.Bold) self.buttonFont = QFont('Times', 13)", "self.WLabel.setText('W_0 (nm):') self.WLabel.setFont(self.labelFont) self.WLabel.setGeometry(QRect(33, 220, 90, 50)) self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont)", "152) R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self) R_offBox.move(55 + (1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox)", "self.polarityValueFields = [] self.memristorTypeValueFields = [] #Crestes the various widgets to take in", "''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else: self.mobility.append(None) if(self.polarityValueFields[i].text()", "import * \"\"\"Class to take in various paramters of the Memristors to be", "in Memristor Paramters for i in range(0, self.numberOfMemristors): numberLabel = QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont", "R_oNBox.resize(60, 25) self.R_onValueFields.append(R_oNBox) R_offBox = QLineEdit(self) R_offBox.move(55 + (1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box", "220, 90, 50)) self.mobLabel = QLabel(self) self.mobLabel.setText('Mobility (\\u03BC):') self.mobLabel.setFont(self.labelFont) self.mobLabel.setGeometry(QRect(19, 260, 100, 50))", "(1+i)*120, 192) R_offBox.resize(60,25) self.R_offValueFields.append(R_offBox) W_0Box = QLineEdit(self) W_0Box.move(55 + (1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box)", "homescreen def home(self): #Window title self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont = QFont(\"Times\",", "+ (1+i)*120, 232) W_0Box.resize(60,25) self.W_0ValueFields.append(W_0Box) mobilityBox = QLineEdit(self) mobilityBox.move(55 + (1+i)*120, 272) mobilityBox.resize(60,25)", "= [] self.polarity = [] self.type = [] self.pValues= [] for i in", "QLineEdit(self) mobilityBox.move(55 + (1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self) polarityBox.move(55 + (1+i)*120,", "abhigyan \"\"\" from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore import", "self.windowBreadth = 550 self.setGeometry(300, 300, self.windowLength, self.windowBreadth) self.setWindowTitle('Memristor Parameters') self.setWindowIcon(QIcon('memristor_icon.ico')) #Sets backgorund Image", "self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True) self.titleLabel.setFont(self.titleFont) self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500, 50)) #Device numbers title self.DeviceLabel", "OK and Cancel button self.OKButton = QPushButton('OK', self) self.OKButton.resize(100, 40) self.OKButton.move(self.windowLength/2 -150, 473)", "= QLabel(self) self.DeviceLabel.setText('Device:') self.DeviceLabelFont = QFont(\"Calibri\", 14, QFont.Bold) self.DeviceLabel.setStyleSheet('QLabel{color:blue}') self.DeviceLabel.setFont(self.DeviceLabelFont) self.DeviceLabel.setGeometry(QRect(35, 60, 100,", "numberLabel.setFont(self.DeviceLabelFont) numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50, 50)) DVFBox = QLineEdit(self) DVFBox.move(55 + (1+i)*120,", "self) self.cancelButton.resize(100, 40) self.cancelButton.move(self.windowLength/2 , 473) self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}') self.cancelButtonFont = QFont('Times', 13)", "if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text() != ''): self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12)) else:", "@author: abhigyan \"\"\" from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore", "mobilityBox = QLineEdit(self) mobilityBox.move(55 + (1+i)*120, 272) mobilityBox.resize(60,25) self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self) polarityBox.move(55", "[] self.mobility = [] self.polarity = [] self.type = [] self.pValues= [] for", "title self.titleLabel = QLabel(self) self.titleLabel.setText('Memristor Parameters') self.titleFont = QFont(\"Times\", 18, QFont.Bold) self.titleLabel.setStyleSheet('QLabel{color:purple}') self.titleFont.setUnderline(True)", "Paramters for i in range(0, self.numberOfMemristors): numberLabel = QLabel(self) numberLabel.setText(str(i+1)) numberLabelFont = QFont(\"Calibri\",", "self.W_0 = [] self.mobility = [] self.polarity = [] self.type = [] self.pValues=", "self.mobilityValueFields.append(mobilityBox) polarityBox = QLineEdit(self) polarityBox.move(55 + (1+i)*120, 312) polarityBox.resize(60,25) self.polarityValueFields.append(polarityBox) comboBox = QComboBox(self)", "to take in parameters self.DValueFields = [] self.R_onValueFields = [] self.R_offValueFields = []", "if(self.R_offValueFields[i].text() != ''): self.R_off.append(float(self.R_offValueFields[i].text())) else: self.R_off.append(None) if(self.W_0ValueFields[i].text() != ''): self.W_0.append(float(self.W_0ValueFields[i].text())) else: self.W_0.append(None) if(self.mobilityValueFields[i].text()" ]
[ "import admin from .models import ServerInfo,SampleData,DeviceControl,UserApp # Register your models here. admin.site.register(ServerInfo) admin.site.register(SampleData)", "from django.contrib import admin from .models import ServerInfo,SampleData,DeviceControl,UserApp # Register your models here.", "admin from .models import ServerInfo,SampleData,DeviceControl,UserApp # Register your models here. admin.site.register(ServerInfo) admin.site.register(SampleData) admin.site.register(DeviceControl)", "django.contrib import admin from .models import ServerInfo,SampleData,DeviceControl,UserApp # Register your models here. admin.site.register(ServerInfo)", "from .models import ServerInfo,SampleData,DeviceControl,UserApp # Register your models here. admin.site.register(ServerInfo) admin.site.register(SampleData) admin.site.register(DeviceControl) admin.site.register(UserApp)" ]
[ "ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5] b = [5,", "== [2, 2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) net = Net()", "[2, 2], [[3, 3, 3]]] list_[2][0][0] = 300 list_[2][0][1] = 301 list_[2][0][2] =", "Unless required by applicable law or agreed to in writing, software # distributed", "44, 55] python_out = compare_func2(a, b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a", "construct(self, x, sens): return self.grad_all_with_sens(self.net)(x, sens) net = Net() grad_net = GradNet(net) x", "stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return tuple(a) net = NetInner() a", "List assign Description: Test list slice extend Expectation: No exception. \"\"\" a =", "== python_out def test_list_slice_insert(): \"\"\" Feature: List assign Description: Test list slice insert", "assign Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5, 6,", "b, 5, 5) assert graph_out == python_out def test_list_slice_erase(): \"\"\" Feature: List assign", "= Net2() pynative_out = net(a, b, 1, None, 3) assert pynative_out == python_out", "[1, 2, 3, 4, 5, 6, 7, 8, 9] b = (11, 22,", "Description: Test list assign the size is equal Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE)", "4] b = [5, 6, 7, 8] python_out = compare_func1(a, b, 0, None,", "Test list slice assign with tuple Expectation: No exception. \"\"\" a = [1,", "7, 8] net = Net1() with pytest.raises(ValueError) as err: net(a, b, 0, None,", "graph_out == python_out def test_list_slice_negative_step(): \"\"\" Feature: List assign Description: Test negative step", "def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[[x, x]]] list_[0][0][0]", "== [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [100] assert", "2], [3, 3, 3]] list_[0] = [100] return list_ net = Net() out", "stop is larger than size Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1,", "4, 5, 6, 7], 8, 9] b = [1111, 2222] python_out = com_func3(a,", "= compare_func1(a, b, 0, None, 2) a = [1, 2, 3, 4] b", "slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 2)", "context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7] net = Net2()", "python_out def test_list_slice_negative_step(): \"\"\" Feature: List assign Description: Test negative step list slice", "str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, 0, None, 2) assert \"attempt", "dimension list slice assign Expectation: No exception. \"\"\" class TestNet(Cell): def construct(self, a,", "8, 9] b = [33, 44, 55] net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError)", "start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return a class NetInner(Cell): def", "stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def compare_func1(a, b, start=None, stop=None, step=None):", "assert foo(a, b, 0, None, 1, 0, None, 3) == net(a, b, 0,", "pynative_out = net(a, b, 5, 5) assert pynative_out == python_out a = [1,", "== python_out def test_list_slice_extend(): \"\"\" Feature: List assign Description: Test list slice extend", "list_ = [[1], [2, 2], [3, 3, 3]] list_[1][0] = 200 list_[1][1] =", "List assign Description: Test list slice start and stop is larger than size", "2] assert list(out[2][0]) == [30, 31, 32] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0])", "net = Net() grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) value", "only assign an iterable\" in str(err.value) def test_list_slice_negetive_error(): \"\"\" Feature: List assign Description:", "55] net = Net2() pynative_out = net(a, b, 5, 5) assert pynative_out ==", "0, None, 2) a = [1, 2, 3, 4] b = [5, 6,", "Test list slice erase Expectation: No exception. \"\"\" a = [1, 2, 3,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= Net() grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) sens =", "list_ = [[1], [2, 2], [3, 3, 3]] list_[0] = [100] return list_", "context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2, 3, 4, 5, 6, 7,", "= b return tuple(a) def com_func3(a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] =", "[2, 2], [[3, 3], [3, 3]]] self.relu = P.ReLU() def construct(self, input_x): list_x", "22, 33, 44, 55] pynative_out = net(a, b, 1234, 0) assert pynative_out ==", "Tensor(np.arange(2 * 3).reshape(2, 3)) value = Tensor(np.ones((2, 3), np.int64)) sens = Tensor(np.arange(2 *", "\"must assign iterable to extended slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err:", "def test_list_slice_erase(): \"\"\" Feature: List assign Description: Test list slice erase Expectation: No", "== [3, 3, 3] def test_list_neg_index_1d(): \"\"\" Feature: List index assign Description: Test", "1) assert \"can only assign an iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as", "0, None, 2) assert \"must assign iterable to extended slice\" in str(err.value) def", "None) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "5, 6, 7, 8, 9] b = [11, 22, 33] net = Net2()", "3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [100] assert list(out[1]) == [2,", "2], [[3, 3, 3]]] list_[2][0][-3] = 30 list_[2][0][-2] = 31 list_[2][0][-1] = 32", "python_out def test_list_slice_length_error(): \"\"\" Feature: List assign Description: Test list assign the size", "-3, -3) assert \"attempt to assign sequence of size 3 to extended slice", "net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[[x, x]]]", "a class NetInner(Cell): def construct(self, a, b, start1, stop1, step1, start2, stop2, step2):", "assign Description: Test list slice extend Expectation: No exception. \"\"\" a = [1,", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "self.relu = P.ReLU() def construct(self, x, value): list_value = [[x], [x, x], [[x,", "net(a, b, 0, 5) assert pynative_out == python_out a = [1, 2, 3,", "= Net() out = net() assert list(out[0]) == [1] assert list(out[1]) == [200,", "list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][-3] = 30 list_[2][0][-2] =", "Feature: List assign Description: Test list slice extend Expectation: No exception. \"\"\" a", "= net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, sens): return self.grad_all_with_sens(self.net)(x, sens)", "3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1, None, 3)", "net(a, b, 0, None, 1, 0, None, 3) def convert_tuple(a): result = tuple()", "step2] = b return a class NetInner(Cell): def construct(self, a, b, start1, stop1,", "graph_out = net(a, [], 1, 3) assert graph_out == python_out def test_list_slice_tuple_without_step(): \"\"\"", "= net(a, b, 1, None, 3) assert graph_out == python_out def test_list_double_slice(): \"\"\"", "in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3, -3) assert", "assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1, None, 3) assert", "3], [3, 3]]] self.relu = P.ReLU() def construct(self, input_x): list_x = self.value list_x[2][0][1]", "return list_ net = Net() net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self,", "8, 9] b = [11, 22, 33, 44, 55] net = Net2() python_out", "[3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1])", "[[1], [2, 2], [[3, 3, 3]]] list_[2][0][0] = 300 list_[2][0][1] = 301 list_[2][0][2]", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "[[3, 3, 3]]] list_[2][0][0] = 300 list_[2][0][1] = 301 list_[2][0][2] = 302 return", "slice assign Expectation: ValueError \"\"\" a = [1, 2, 3, 4, 5, 6,", "b, 0, 0) assert graph_out == python_out def test_list_slice_extend_inner(): \"\"\" Feature: List assign", "slice assign Expectation: No exception. \"\"\" class TestNet(Cell): def construct(self, a, b, index,", "return list_ net = Net() net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self,", "with tuple Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5,", "net = Net() net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__()", "= [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 5, 5)", "3, 3] def test_list_index_3d(): \"\"\" Feature: List index assign Description: Test list assign", "4, 5, 6, 7, 8, 9] b = (11, 22, 33) net =", "b return tuple(a) def com_func3(a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b", "net() assert list(out[0]) == [1] assert list(out[1]) == [2, 2] assert list(out[2][0]) ==", "3) context.set_context(mode=context.PYNATIVE_MODE) net = TestNet() a = [1, 2, [1, 2, 3, 4,", "python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, -12, 456) assert graph_out == python_out def", "[3, 3, 3] def test_list_neg_index_2d(): \"\"\" Feature: List index assign Description: Test list", "net(a, [], 1, 3) assert graph_out == python_out def test_list_slice_tuple_without_step(): \"\"\" Feature: List", "value return self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net = net", "exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3,", "= net(a, b, 0, 1) assert pynative_out == python_out a = [1, 2,", "5, 6, 7] net = Net2() pynative_out = net(a, [], 1, 3) assert", "assert list(out[2][0]) == [300, 301, 302] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) ==", "0) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2, 3, 4, 5, 6,", "3)) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a, b, 2, 1, None,", "sequence of size 2 to extended slice of size 3\" in str(err.value) def", "5) assert pynative_out == python_out a = [1, 2, 3, 4, 5, 6,", "assert list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] def test_list_index_2d():", "6, 7, 8, 9] b = [11, 22, 33] assert foo(a, b, 0,", "= Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3, -3) assert", "302] def test_list_neg_index_3d(): \"\"\" Feature: List index assign Description: Test list assign in", "55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1234, 0) assert graph_out == python_out def", "= net(a, b, 5, 5) assert pynative_out == python_out a = [1, 2,", "net(a, b, 0, None, 2) assert \"attempt to assign sequence of size 2", "= Tensor(np.arange(2 * 3).reshape(2, 3)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, sens)", "== [3, 3, 3] def test_list_index_3d(): \"\"\" Feature: List index assign Description: Test", "return result def test_list_in_list_slice(): \"\"\" Feature: List assign Description: Test high dimension list", "2, 3, 4, 5, 6, 7, 8, 9] b = [11, 22, 33,", "[3, 3, 3] def test_list_neg_index_1d(): \"\"\" Feature: List index assign Description: Test list", "5, 6, 7, 8, 9] b = [33, 44, 55] pynative_out = net(a,", "index assign Description: Test list assign in pynative mode Expectation: No exception. \"\"\"", "b = (11, 22, 33) python_out = compare_func2(a, b, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE)", "b = [11, 22, 33, 44, 55] net = Net2() python_out = compare_func2(a,", "[200, 201] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_2d(): \"\"\" Feature: List", "b, 0, 5) a = [1, 2, 3, 4, 5, 6, 7, 8,", "return tuple(a) def test_list_slice_length_equal(): \"\"\" Feature: List assign Description: Test list assign the", "[2, 2], [3, 3, 3]] list_[1][0] = 200 list_[1][1] = 201 return list_", "ValueError \"\"\" a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 4, None) assert graph_out == python_out def", "9] b = [11, 22, 33, 44, 55] pynative_out = net(a, b, 1234,", "assign an iterable\" in str(err.value) def test_list_slice_negetive_error(): \"\"\" Feature: List assign Description: Test", "No exception. \"\"\" a = [1, 2, 3, 4, 5, 6, 7, 8,", "None) assert pynative_out == python_out a = [1, 2, 3, 4, 5, 6,", "continue result += (i,) return result def test_list_in_list_slice(): \"\"\" Feature: List assign Description:", "TestNet() a = [1, 2, [1, 2, 3, 4, 5, 6, 7], 8,", "3]]] self.relu = P.ReLU() def construct(self, x, value): list_value = [[x], [x, x],", "b = [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1234,", "tuple Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5, 6,", "[11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 0) assert", "size 1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3,", "and stop is larger than size Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a =", "[2, 2] assert list(out[2]) == [3, 3, 3] def test_list_index_2d(): \"\"\" Feature: List", "Test list slice shrink assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1,", "pynative_out = net(a, b, 0, 4, None) assert pynative_out == python_out a =", "b = (11, 22, 33) net = Net2() pynative_out = net(a, b, 0,", "def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [x] list_[0] =", "8, 9] b = [11, 22, 33, 44, 55] python_out = compare_func2(a, b,", "net(a, b, 1, None, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a,", "construct(self, x, value, sens): return self.grad_all_with_sens(self.net)(x, value, sens) net = Net() grad_net =", "not use this file except in compliance with the License. # You may", "== python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8,", "nn from mindspore.nn import Cell from mindspore.ops import composite as C from mindspore.ops", "4, None) assert pynative_out == python_out a = [1, 2, 3, 4, 5,", "list(out[2]) == [3, 3, 3] def test_list_index_2d(): \"\"\" Feature: List index assign Description:", "context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, 0, None, 2) assert \"attempt to", "stop=None, step=None): a[index][start:stop:step] = b return convert_tuple(a) a = [1, 2, [1, 2,", "under the License. # ============================================================================ \"\"\" test enumerate\"\"\" import numpy as np import", "size 3\" in str(err.value) def compare_func2(a, b, start=None, stop=None, step=None): a[start:stop:step] = b", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "value): list_value = [[x], [x, x], [[x, x], [x, x]]] list_value[2][0][1] = value", "3, 4, 5, 6, 7, 8, 9] b = (11, 22, 33) python_out", "as P from mindspore import Tensor, ms_function from mindspore import context def test_list_index_1d():", "[2, 2], [3, 3, 3]] list_[-3] = [100] return list_ net = Net()", "net(a, b, -1, -9, -3) assert graph_out == python_out def test_graph_list_slice_assign_extended_number(): \"\"\" Feature:", "err: net(a, b, 0, None, 2) assert \"attempt to assign sequence of size", "return self.grad_all_with_sens(self.net)(x, sens) net = Net() grad_net = GradNet(net) x = Tensor(np.arange(2 *", "agreed to in writing, software # distributed under the License is distributed on", "P.ReLU() def construct(self, x, value): list_value = [[x], [x, x], [[x, x], [x,", "self).__init__() self.value = [[1], [2, 2], [[3, 3], [3, 3]]] self.relu = P.ReLU()", "Test negative step list slice assign Expectation: ValueError \"\"\" a = [1, 2,", "9] b = [11, 22, 33, 44, 55] python_out = compare_func2(a, b, -12,", "6, 7, 8, 9] b = [33, 44, 55] python_out = compare_func2(a, b,", "[11, 22, 33, 44, 55] pynative_out = net(a, b, 1234, 0) assert pynative_out", "6, 7, 8, 9] b = (11, 22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out = net(a,", "list_ = [[x, x]] list_[0][0] = 100 return list_ net = Net() net(Tensor(0))", "== [300, 301, 302] def test_list_neg_index_3d(): \"\"\" Feature: List index assign Description: Test", "55] pynative_out = net(a, b, 0, 0) assert pynative_out == python_out a =", "b return tuple(a) def test_list_slice_shrink(): \"\"\" Feature: List assign Description: Test list slice", "b, 0, 4, None) assert pynative_out == python_out a = [1, 2, 3,", "== python_out def test_list_slice_extend_front(): \"\"\" Feature: List assign Description: Test list slice extend", "= [1, 2, 3, 4, 5, 6, 7, 8, 9] b = (11,", "2, 3, 4, 5, 6, 7, 8, 9] b = (11, 22, 33)", "slice\" in str(err.value) def test_graph_list_slice_assign_number(): \"\"\" Feature: List assign Description: Test negative step", "list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) net =", "[11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 1) assert", "extended slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None,", "22, 33, 44, 55] net = Net2() python_out = compare_func2(a, b, 1234, 0)", "test_list_slice_length_equal(): \"\"\" Feature: List assign Description: Test list assign the size is equal", "list(out[1]) == [2, 2] assert list(out[2][0]) == [300, 301, 302] def test_list_neg_index_3d(): \"\"\"", "context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[[x, x]]] list_[0][0][0] = 100", "def test_list_slice_extend(): \"\"\" Feature: List assign Description: Test list slice extend Expectation: No", "assert graph_out == python_out def test_list_slice_extend_inner(): \"\"\" Feature: List assign Description: Test list", "com_func3(a, b, 2, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) net = TestNet() a = [1,", "None, 2) assert \"attempt to assign sequence of size 2 to extended slice", "[2, 2] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_1d(): \"\"\" Feature: List", "Description: Test list assign in pynative mode Expectation: No exception. \"\"\" class Net(nn.Cell):", "20 list_[1][-1] = 21 return list_ net = Net() out = net() assert", "list_[2][0][-1] = 32 return list_ net = Net() out = net() assert list(out[0])", "pynative_out = net(a, b, -1, -9, -3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a", "python_out = compare_func2(a, b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4,", "Net() net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value =", "in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, 0, None, 2) assert", "44, 55] net = Net2() pynative_out = net(a, b, 5, 5) assert pynative_out", "55] python_out = compare_func2(a, b, -12, 456) a = [1, 2, 3, 4,", "Description: Test list slice start and stop is larger than size Expectation: No", "[1, 2, 3, 4] b = [5, 6, 7, 8] python_out = compare_func1(a,", "a[start:stop:step] = b return tuple(a) def test_list_slice_shrink(): \"\"\" Feature: List assign Description: Test", "to in writing, software # distributed under the License is distributed on an", "[1, 2, 3, 4, 5, 6, 7], 8, 9] b = [1111, 2222]", "assert \"must assign iterable to extended slice\" in str(err.value) def test_graph_list_slice_assign_number(): \"\"\" Feature:", "b, 0, 1) assert graph_out == python_out def test_list_slice_assign(): \"\"\" Feature: List assign", "size 3 to extended slice of size 1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError)", "implied. # See the License for the specific language governing permissions and #", "= [[1], [2, 2], [3, 3, 3]] list_[0] = [100] return list_ net", "def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[1][0] = 200", "2, 3, 4, 5] b = [5, 6, 7, 8] net = Net1()", "0) assert graph_out == python_out def test_list_slice_extend_front(): \"\"\" Feature: List assign Description: Test", "Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][-3] =", "5, 5) assert graph_out == python_out def test_list_slice_erase(): \"\"\" Feature: List assign Description:", "graph_out == python_out def test_list_slice_extend_inner(): \"\"\" Feature: List assign Description: Test list slice", "= net(a, b, 1234, 0) assert graph_out == python_out def test_list_slice_extend_front(): \"\"\" Feature:", "9] b = [1111, 2222] python_out = com_func3(a, b, 2, 1, None, 3)", "8, 9] b = (11, 22, 33) net = Net2() pynative_out = net(a,", "44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 0) assert graph_out == python_out", "6, 7, 8] net = Net1() with pytest.raises(ValueError) as err: net(a, b, 0,", "step=None): a[index][start:stop:step] = b return convert_tuple(a) a = [1, 2, [1, 2, 3,", "sens): return self.grad_all_with_sens(self.net)(x, value, sens) net = Net() grad_net = GradNet(net) x =", "assert list(out[0]) == [1] assert list(out[1]) == [20, 21] assert list(out[2]) == [3,", "net(a, b, 1234, 0) assert pynative_out == python_out a = [1, 2, 3,", "sens_param=True) def construct(self, x, sens): return self.grad_all_with_sens(self.net)(x, sens) net = Net() grad_net =", "Feature: List assign Description: Test list slice assign with tuple Expectation: No exception.", "\"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "b = [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 5,", "5) a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b", "pynative mode Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ =", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "List assign Description: Test list slice extend Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a", "9] b = [11, 22, 33] assert foo(a, b, 0, None, 1, 0,", "= b[start:stop:step] return tuple(a) def compare_func1(a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step]", "== [100] assert list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3]", "python_out def test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List assign Description: Test negative step list slice", "\"\"\" Feature: List assign Description: Test negative step list slice assign Expectation: ValueError", "No exception. \"\"\" class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3,", "8] net = Net1() with pytest.raises(ValueError) as err: net(a, b, 0, None, 2)", "context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 2) assert \"must assign", "9] b = [11, 22, 33] python_out = compare_func2(a, b, 0, 5) a", "[11, 22, 33, 44, 55] net = Net2() pynative_out = net(a, b, 5,", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5, 6] b", "context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3, -3) assert \"attempt to", "9] b = [33, 44, 55] graph_out = net(a, b, -1, -9, -3)", "mode Expectation: No exception. \"\"\" class Net(nn.Cell): def construct(self): list_ = [[1], [2,", "= C.GradOperation(get_all=True, sens_param=True) def construct(self, x, sens): return self.grad_all_with_sens(self.net)(x, sens) net = Net()", "= Net2() pynative_out = net(a, b, 0, 5) assert pynative_out == python_out a", "err: net(a, b, 0, None, 1) assert \"can only assign an iterable\" in", "a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b", "list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [100]", "b, 5, 5) assert pynative_out == python_out a = [1, 2, 3, 4,", "8, 9] b = [11, 22, 33] assert foo(a, b, 0, None, 1,", "= 21 return list_ net = Net() out = net() assert list(out[0]) ==", "class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][-3]", "list(out[0]) == [1] assert list(out[1]) == [20, 21] assert list(out[2]) == [3, 3,", "pynative_mode_out = net(a, b, 0, None, 2) assert pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "[[3, 3], [3, 3]]] self.relu = P.ReLU() def construct(self, input_x): list_x = self.value", "= [5, 6, 7, 8] net = Net1() pynative_mode_out = net(a, b, 0,", "# Copyright 2020-2022 Huawei Technologies Co., Ltd # # Licensed under the Apache", "you may not use this file except in compliance with the License. #", "def test_list_slice_tuple_with_step(): \"\"\" Feature: List assign Description: Test list slice assign with tuple", "net(a, b, 0, 0) assert pynative_out == python_out a = [1, 2, 3,", "graph_out = net(a, b, 1, None, 3) assert graph_out == python_out def test_list_double_slice():", "Net() out = net() assert list(out[0]) == [1] assert list(out[1]) == [2, 2]", "mode Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ = [[1],", "Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[1][0] =", "3]] list_[-3] = [100] return list_ net = Net() out = net() assert", "= (11, 22, 33) python_out = compare_func2(a, b, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) a", "compare_func2(a, b, -1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2,", "not equal Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5]", "6, 7] graph_out = net(a, [], 1, 3) assert graph_out == python_out def", "list_[0] = [100] return list_ net = Net() out = net() assert list(out[0])", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "index, start=None, stop=None, step=None): a[index][start:stop:step] = b return convert_tuple(a) a = [1, 2,", "exception. \"\"\" class TestNet(Cell): def construct(self, a, b, index, start=None, stop=None, step=None): a[index][start:stop:step]", "test_list_neg_index_1d(): \"\"\" Feature: List index assign Description: Test list assign in pynative mode", "4, 5, 6, 7, 8, 9] b = [11, 22, 33] assert foo(a,", "negative step list slice assign Expectation: No exception. \"\"\" a = [1, 2,", "7, 8, 9] b = [11, 22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b,", "sequence of size 2 to extended slice of size 3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE)", "= [[1], [2, 2], [3, 3, 3]] list_[1][-2] = 20 list_[1][-1] = 21", "1, None, 3)) assert graph_out == python_out def test_list_slice_negative_step(): \"\"\" Feature: List assign", "33) context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 4, None) assert graph_out == python_out", "assert \"must assign iterable to extended slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as", "out = net() assert list(out[0]) == [1] assert list(out[1]) == [200, 201] assert", "== [2, 2] assert list(out[2][0]) == [300, 301, 302] def test_list_neg_index_3d(): \"\"\" Feature:", "x, value, sens): return self.grad_all_with_sens(self.net)(x, value, sens) net = Net() grad_net = GradNet(net)", "tuple(a) net = NetInner() a = [1, 2, 3, 4, 5, 6, 7,", "= [11, 22, 33, 44, 55] net = Net2() pynative_out = net(a, b,", "b = [11, 22, 33] python_out = compare_func2(a, b, 0, 5) a =", "a = [1, 2, 3, 4, 5, 6, 7] python_out = compare_func2(a, [],", "compare_func2(a, [], 1, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6,", "tuple(a) def test_list_slice_length_equal(): \"\"\" Feature: List assign Description: Test list assign the size", "python_out = compare_func2(a, b, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3,", "4, 5, 6, 7, 8, 9] b = [11, 22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out", "is equal Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4]", "1) net = Net2() a = [1, 2, 3, 4, 5, 6, 7,", "test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value = [[1], [2, 2],", "[3, 3, 3]] list_[-3] = [100] return list_ net = Net() out =", "a[index][start:stop:step] = b return convert_tuple(a) a = [1, 2, [1, 2, 3, 4,", "No exception. \"\"\" a = [1, 2, 3, 4, 5, 6, 7] python_out", "Test list assign in pynative mode Expectation: No exception. \"\"\" class Net(nn.Cell): def", "6, 7, 8, 9] b = [11, 22, 33, 44, 55] pynative_out =", "b = [1111, 2222] pynative_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert", "start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) def test_list_slice_shrink(): \"\"\" Feature: List", "7, 8, 9] b = [33, 44, 55] pynative_out = net(a, b, -1,", "= net(a, b, 0, 0) assert pynative_out == python_out a = [1, 2,", "with pytest.raises(ValueError) as err: net(a, b, -1, -3, -3) assert \"attempt to assign", "exception. \"\"\" a = [1, 2, 3, 4, 5, 6, 7] python_out =", "= compare_func2(a, b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2,", "3, 4, 5, 6, 7, 8, 9] b = [11, 22, 33, 44,", "test_list_slice_length_error(): \"\"\" Feature: List assign Description: Test list assign the size is not", "201] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0])", "context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [100] assert list(out[1]) == [2, 2]", "== [3, 3, 3] def test_list_neg_index_2d(): \"\"\" Feature: List index assign Description: Test", "3).reshape(2, 3)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE)", "def test_list_neg_index_1d(): \"\"\" Feature: List index assign Description: Test list assign in pynative", "= com_func3(a, b, 2, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) net = TestNet() a =", "in pynative mode Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_", "tuple() for i in a: if isinstance(i, list): result += (tuple(i),) continue result", "3, 4, 5, 6, 7], 8, 9] b = [1111, 2222] pynative_out =", "== python_out def test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List assign Description: Test negative step list", "assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5, 6,", "net = Net() out = net() assert list(out[0]) == [100] assert list(out[1]) ==", "== [1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [30, 31, 32]", "\"\"\" Feature: List assign Description: Test list slice start and stop is larger", "[300, 301, 302] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1])", "to assign sequence of size 3 to extended slice of size 1\" in", "test_list_slice_negetive_error(): \"\"\" Feature: List assign Description: Test negative step list slice assign Expectation:", "ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a, b, start1, stop1, step1, start2, stop2, step2):", "\"can only assign an iterable\" in str(err.value) def test_list_slice_negetive_error(): \"\"\" Feature: List assign", "= net() assert list(out[0]) == [100] assert list(out[1]) == [2, 2] assert list(out[2])", "b = (11, 22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 4, None)", "python_out def test_list_slice_assign(): \"\"\" Feature: List assign Description: Test list slice start and", "== python_out a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "22, 33) python_out = compare_func2(a, b, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1,", "[[1], [2, 2], [[3, 3], [3, 3]]] self.relu = P.ReLU() def construct(self, input_x):", "net = NetInner() a = [1, 2, 3, 4, 5, 6, 7, 8,", "4, 5, 6, 7, 8, 9] b = (11, 22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out", "22, 33, 44, 55] python_out = compare_func2(a, b, 0, 1) net = Net2()", "2, [1, 2, 3, 4, 5, 6, 7], 8, 9] b = [1111,", "extended slice of size 3\" in str(err.value) def compare_func2(a, b, start=None, stop=None, step=None):", "list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][0] = 300 list_[2][0][1] =", "[[1], [2, 2], [[3, 3], [3, 3]]] self.relu = P.ReLU() def construct(self, x,", "== python_out def test_list_slice_extend_inner(): \"\"\" Feature: List assign Description: Test list slice extend", "List assign Description: Test negative step list slice assign Expectation: No exception. \"\"\"", "equal Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4] b", "1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7,", "str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 2) assert \"must", "b, 2, 1, None, 3)) assert graph_out == python_out def test_list_slice_negative_step(): \"\"\" Feature:", "Net() net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[x,", "net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value = [[1],", "python_out = compare_func2(a, b, -12, 456) a = [1, 2, 3, 4, 5,", "22, 33, 44, 55] pynative_out = net(a, b, 0, 0) assert pynative_out ==", "= [100] return list_ net = Net() out = net() assert list(out[0]) ==", "assign the size is equal Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1,", "5) assert graph_out == python_out def test_list_slice_insert(): \"\"\" Feature: List assign Description: Test", "mindspore.ops import operations as P from mindspore import Tensor, ms_function from mindspore import", "= net(a, b, 0, 1) assert graph_out == python_out def test_list_slice_assign(): \"\"\" Feature:", "Test list double slice assign Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a, b,", "None, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1, None,", "def construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][0] = 300", "def test_list_slice_negative_step(): \"\"\" Feature: List assign Description: Test negative step list slice assign", "b, 0, None, 1, 0, None, 3) == net(a, b, 0, None, 1,", "= [1, 2, 3, 4] b = [5, 6, 7, 8] python_out =", "a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def compare_func1(a, b,", "[100] assert list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE)", "1, None, 3)) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a, b, 2,", "2, 3, 4] b = [5, 6, 7, 8] python_out = compare_func1(a, b,", "assign Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5, 6]", "list slice assign with tuple Expectation: No exception. \"\"\" a = [1, 2,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "[1, 2, 3, 4, 5, 6, 7, 8, 9] b = [11, 22,", "b = [5, 6, 7, 8] net = Net1() pynative_mode_out = net(a, b,", "[1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [300, 301, 302] context.set_context(mode=context.GRAPH_MODE)", "8, 9] b = [1111, 2222] pynative_out = convert_tuple(net(a, b, 2, 1, None,", "import pytest import mindspore.nn as nn from mindspore.nn import Cell from mindspore.ops import", "== [200, 201] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_2d(): \"\"\" Feature:", "b = [5, 6, 7, 8] net = Net1() with pytest.raises(ValueError) as err:", "x]] list_[0][0] = 100 return list_ net = Net() net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE)", "= [11, 22, 33, 44, 55] python_out = compare_func2(a, b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE)", "22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 5, 5) assert graph_out", "0, None, 1, 0, None, 3) def convert_tuple(a): result = tuple() for i", "assert graph_out == python_out def test_list_slice_assign(): \"\"\" Feature: List assign Description: Test list", "def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value = [[1], [2,", "2 to extended slice of size 3\" in str(err.value) def compare_func2(a, b, start=None,", "result += (tuple(i),) continue result += (i,) return result def test_list_in_list_slice(): \"\"\" Feature:", "assign Expectation: No exception. \"\"\" class TestNet(Cell): def construct(self, a, b, index, start=None,", "Expectation: No exception. \"\"\" class TestNet(Cell): def construct(self, a, b, index, start=None, stop=None,", "Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[-3] =", "self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net = net self.grad_all_with_sens =", "Feature: List assign Description: Test high dimension list slice assign Expectation: No exception.", "= GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) value = Tensor(np.ones((2, 3), np.int64))", "1) assert pynative_out == python_out a = [1, 2, 3, 4, 5, 6,", "8, 9] b = [1111, 2222] python_out = com_func3(a, b, 2, 1, None,", "python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "list_value = [[x], [x, x], [[x, x], [x, x]]] list_value[2][0][1] = value return", "foo(a, b, 0, None, 1, 0, None, 3) == net(a, b, 0, None,", "55] pynative_out = net(a, b, -1, -9, -3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE)", "pynative_out = net(a, b, 0, 5) assert pynative_out == python_out a = [1,", "None, 2) assert \"must assign iterable to extended slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with", "list slice extend Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3,", "b, 1234, 0) a = [1, 2, 3, 4, 5, 6, 7, 8,", "= Net2() pynative_out = net(a, b, -12, 456) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE)", "2) assert \"attempt to assign sequence of size 2 to extended slice of", "Net() out = net() assert list(out[0]) == [1] assert list(out[1]) == [200, 201]", "None, 1, 0, None, 3) def convert_tuple(a): result = tuple() for i in", "See the License for the specific language governing permissions and # limitations under", "err: net(a, b, -1, -3, -3) assert \"attempt to assign sequence of size", "list_ net = Net() out = net() assert list(out[0]) == [100] assert list(out[1])", "from mindspore.ops import composite as C from mindspore.ops import operations as P from", "net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3, -3)", "33] net = Net2() pynative_out = net(a, b, 0, 5) assert pynative_out ==", "def construct(self, x): list_ = [x] list_[0] = 100 return list_ net =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "Description: Test negative step list slice assign Expectation: No exception. \"\"\" a =", "= [11, 22, 33] net = Net2() pynative_out = net(a, b, 0, 5)", "list(out[1]) == [2, 2] assert list(out[2][0]) == [30, 31, 32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE)", "assert graph_out == python_out def test_list_double_slice(): \"\"\" Feature: List assign Description: Test list", "list(out[1]) == [20, 21] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out =", "list(out[2]) == [3, 3, 3] def test_list_neg_index_2d(): \"\"\" Feature: List index assign Description:", "larger than size Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3,", "of size 3 to extended slice of size 1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with", "return list_ context.set_context(mode=context.PYNATIVE_MODE) net = Net() out = net() assert list(out[0]) == [1]", "assert pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, None, 2) assert", "assign Expectation: ValueError \"\"\" a = [1, 2, 3, 4, 5, 6, 7,", "assign an iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0,", "list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1]", "super(GradNet, self).__init__() self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, value,", "= Net1() with pytest.raises(ValueError) as err: net(a, b, 0, None, 2) assert \"attempt", "size Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5,", "b, -12, 456) assert graph_out == python_out def test_list_slice_extend(): \"\"\" Feature: List assign", "8, 9] b = [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a,", "super(GradNet, self).__init__() self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, sens):", "net = Net1() pynative_mode_out = net(a, b, 0, None, 2) assert pynative_mode_out ==", "== [2, 2] assert list(out[2][0]) == [30, 31, 32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class", "the size is equal Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "in str(err.value) def test_list_slice_negetive_error(): \"\"\" Feature: List assign Description: Test negative step list", "Net2() pynative_out = net(a, b, 1, None, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE)", "\"\"\" Feature: List assign Description: Test list slice shrink assign Expectation: No exception.", "[33, 44, 55] graph_out = net(a, b, -1, -9, -3) assert graph_out ==", "Net(nn.Cell): def construct(self, x): list_ = [[[x, x]]] list_[0][0][0] = 100 return list_", "class TestNet(Cell): def construct(self, a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b", "(11, 22, 33) python_out = compare_func2(a, b, 0, 4, None) context.set_context(mode=context.PYNATIVE_MODE) a =", "[], 1, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7]", "None, 1) assert \"can only assign an iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError)", "a, b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) def test_list_slice_shrink(): \"\"\"", "= net(a, b, -12, 456) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a,", "graph_out == python_out def test_list_slice_erase(): \"\"\" Feature: List assign Description: Test list slice", "list_[2][0][0] = 300 list_[2][0][1] = 301 list_[2][0][2] = 302 return list_ context.set_context(mode=context.PYNATIVE_MODE) net", "33] python_out = compare_func2(a, b, 0, 5) a = [1, 2, 3, 4,", "None) assert graph_out == python_out def test_list_slice_tuple_with_step(): \"\"\" Feature: List assign Description: Test", "context.set_context(mode=context.GRAPH_MODE) net = Net() out = net() assert list(out[0]) == [100] assert list(out[1])", "net = Net() net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_", "No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7,", "context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1, None, 3) assert graph_out == python_out def", "net = Net1() with pytest.raises(ValueError) as err: net(a, b, 0, None, 2) assert", "\"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2:", "assert list(out[1]) == [2, 2] assert list(out[2][0]) == [300, 301, 302] def test_list_neg_index_3d():", "= [1, 2, 3, 4, 5, 6] b = 1 net = Net2()", "net(a, b, 0, 1) assert graph_out == python_out def test_list_slice_assign(): \"\"\" Feature: List", "[[x], [x, x], [[x, x], [x, x]]] list_value[2][0][1] = value return self.relu(list_value[2][0][1]) class", "net(a, b, 0, 4, None) assert graph_out == python_out def test_list_slice_tuple_with_step(): \"\"\" Feature:", "6, 7, 8] net = Net1() pynative_mode_out = net(a, b, 0, None, 2)", "x = Tensor(np.arange(2 * 3).reshape(2, 3)) value = Tensor(np.ones((2, 3), np.int64)) sens =", "[11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1234, 0) assert", "7, 8, 9] b = [11, 22, 33] assert foo(a, b, 0, None,", "6, 7, 8, 9] b = [11, 22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a,", "301, 302] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) ==", "= Net2() pynative_out = net(a, [], 1, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE)", "7] python_out = compare_func2(a, [], 1, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3,", "3, 4, 5, 6, 7, 8, 9] b = [33, 44, 55] net", "[30, 31, 32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ =", "33, 44, 55] pynative_out = net(a, b, 0, 1) assert pynative_out == python_out", "8, 9] b = [11, 22, 33, 44, 55] net = Net2() pynative_out", "construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def compare_func1(a,", "b return tuple(a) net = NetInner() a = [1, 2, 3, 4, 5,", "KIND, either express or implied. # See the License for the specific language", "\"attempt to assign sequence of size 2 to extended slice of size 3\"", "test_list_slice_extend(): \"\"\" Feature: List assign Description: Test list slice extend Expectation: No exception.", "Net2() pynative_out = net(a, [], 1, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a", "[30, 31, 32] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1])", "list_ = [[[x, x]]] list_[0][0][0] = 100 return list_ net = Net() net(Tensor(0))", "33) net = Net2() pynative_out = net(a, b, 1, None, 3) assert pynative_out", "5, 6, 7, 8, 9] b = [11, 22, 33] python_out = compare_func2(a,", "\"must assign iterable to extended slice\" in str(err.value) def test_graph_list_slice_assign_number(): \"\"\" Feature: List", "44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 1) assert graph_out == python_out", "31 list_[2][0][-1] = 32 return list_ net = Net() out = net() assert", "mindspore import context def test_list_index_1d(): \"\"\" Feature: List index assign Description: Test list", "0) assert pynative_out == python_out a = [1, 2, 3, 4, 5, 6,", "with pytest.raises(TypeError) as err: net(a, b, 0, None, 1) assert \"can only assign", "net(a, b, 0, 5) assert graph_out == python_out def test_list_slice_insert(): \"\"\" Feature: List", "pynative mode Expectation: No exception. \"\"\" class Net(nn.Cell): def construct(self): list_ = [[1],", "tuple(a) def test_list_slice_shrink(): \"\"\" Feature: List assign Description: Test list slice shrink assign", "high dimension list slice assign Expectation: No exception. \"\"\" class TestNet(Cell): def construct(self,", "assign in pynative mode Expectation: No exception. \"\"\" class Net(nn.Cell): def construct(self): list_", "graph_out = net(a, b, 0, 0) assert graph_out == python_out def test_list_slice_extend_inner(): \"\"\"", "def convert_tuple(a): result = tuple() for i in a: if isinstance(i, list): result", "2) assert graph_out == python_out def test_list_slice_length_error(): \"\"\" Feature: List assign Description: Test", "5, 6, 7], 8, 9] b = [1111, 2222] pynative_out = convert_tuple(net(a, b,", "3]]] list_[2][0][0] = 300 list_[2][0][1] = 301 list_[2][0][2] = 302 return list_ context.set_context(mode=context.PYNATIVE_MODE)", "ANY KIND, either express or implied. # See the License for the specific", "compare_func2(a, b, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5,", "List assign Description: Test list slice assign with tuple Expectation: No exception. \"\"\"", "4, 5, 6, 7, 8, 9] b = [33, 44, 55] pynative_out =", "0, None, 1) assert \"can only assign an iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with", "2) assert \"must assign iterable to extended slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError)", "3), np.int64)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, value, sens) class Net1(Cell):", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "\"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3, 3,", "step=None): a[start:stop:step] = b return tuple(a) class Net2(Cell): def construct(self, a, b, start=None,", "[11, 22, 33, 44, 55] python_out = compare_func2(a, b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE) net", "assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) net = Net() out = net()", "55] python_out = compare_func2(a, b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3,", "\"\"\" a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b", "Net() out = net() assert list(out[0]) == [1] assert list(out[1]) == [20, 21]", "context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 2) assert \"must assign", "list_ net = Net() net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net,", "pynative_out = net(a, b, -12, 456) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out =", "as err: net(a, b, 0, None, 2) assert \"must assign iterable to extended", "2] assert list(out[2]) == [3, 3, 3] def test_list_index_2d(): \"\"\" Feature: List index", "= 301 list_[2][0][2] = 302 return list_ context.set_context(mode=context.PYNATIVE_MODE) net = Net() out =", "9] b = [11, 22, 33, 44, 55] net = Net2() pynative_out =", "context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, None, 2) assert graph_out == python_out def", "return tuple(a) net = NetInner() a = [1, 2, 3, 4, 5, 6,", "out = net() assert list(out[0]) == [100] assert list(out[1]) == [2, 2] assert", "x, value): list_value = [[x], [x, x], [[x, x], [x, x]]] list_value[2][0][1] =", "assert \"attempt to assign sequence of size 2 to extended slice of size", "context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1234, 0) assert graph_out == python_out def test_list_slice_extend_front():", "construct(self, x, value): list_value = [[x], [x, x], [[x, x], [x, x]]] list_value[2][0][1]", "= net(a, b, 0, None, 2) assert graph_out == python_out def test_list_slice_length_error(): \"\"\"", "GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) value = Tensor(np.ones((2, 3), np.int64)) sens", "= Net() out = net() assert list(out[0]) == [1] assert list(out[1]) == [2,", "assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5,", "6, 7, 8, 9] b = (11, 22, 33) python_out = compare_func2(a, b,", "7, 8, 9] b = [33, 44, 55] net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with", "net = TestNet() a = [1, 2, [1, 2, 3, 4, 5, 6,", "3)) assert graph_out == python_out def test_list_slice_negative_step(): \"\"\" Feature: List assign Description: Test", "context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 1) assert \"can only", "2, 3, 4, 5, 6, 7], 8, 9] b = [1111, 2222] python_out", "55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 5, 5) assert graph_out == python_out def", "No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2],", "net() assert list(out[0]) == [1] assert list(out[1]) == [200, 201] assert list(out[2]) ==", "[11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 5, 5) assert", "Net1() pynative_mode_out = net(a, b, 0, None, 2) assert pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE)", "55] pynative_out = net(a, b, 1234, 0) assert pynative_out == python_out a =", "2222] pynative_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert pynative_out == python_out", "3) == net(a, b, 0, None, 1, 0, None, 3) def convert_tuple(a): result", "None, 1) assert \"can only assign an iterable\" in str(err.value) def test_list_slice_negetive_error(): \"\"\"", "3)) grad_net(x, sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value", "-12, 456) assert graph_out == python_out def test_list_slice_extend(): \"\"\" Feature: List assign Description:", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "b = [11, 22, 33, 44, 55] python_out = compare_func2(a, b, -12, 456)", "slice of size 3\" in str(err.value) def compare_func2(a, b, start=None, stop=None, step=None): a[start:stop:step]", "assign Description: Test list slice assign with tuple Expectation: No exception. \"\"\" a", "44, 55] pynative_out = net(a, b, 0, 1) assert pynative_out == python_out a", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "def construct(self, a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return tuple(a)", "net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, value, sens): return self.grad_all_with_sens(self.net)(x, value,", "in str(err.value) def compare_func2(a, b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a)", "b return tuple(a) class Net2(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step]", "as nn from mindspore.nn import Cell from mindspore.ops import composite as C from", "b, 0, 0) assert pynative_out == python_out a = [1, 2, 3, 4,", "= input_x return self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net =", "list_[2][0][-2] = 31 list_[2][0][-1] = 32 return list_ net = Net() out =", "applicable law or agreed to in writing, software # distributed under the License", "assign Description: Test list assign the size is equal Expectation: No exception. \"\"\"", "slice insert assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3,", "= Net() net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ =", "construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[1][0] = 200 list_[1][1]", "= 20 list_[1][-1] = 21 return list_ net = Net() out = net()", "graph_out = net(a, b, 0, 5) assert graph_out == python_out def test_list_slice_insert(): \"\"\"", "insert assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4,", "\"\"\" context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5] b = [5, 6,", "[2, 2] assert list(out[2][0]) == [300, 301, 302] def test_list_neg_index_3d(): \"\"\" Feature: List", "2], [[3, 3, 3]]] list_[2][0][0] = 300 list_[2][0][1] = 301 list_[2][0][2] = 302", "context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4] b = [5, 6, 7, 8]", "list_[-3] = [100] return list_ net = Net() out = net() assert list(out[0])", "Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6,", "== python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1, None, 3) assert graph_out ==", "assign Description: Test list assign the size is not equal Expectation: ValueError. \"\"\"", "8, 9] b = (11, 22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0,", "2, 3, 4, 5, 6, 7], 8, 9] b = [1111, 2222] pynative_out", "= [33, 44, 55] python_out = compare_func2(a, b, -1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE) net", "Description: Test list slice erase Expectation: No exception. \"\"\" a = [1, 2,", "to assign sequence of size 2 to extended slice of size 3\" in", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Feature: List assign Description: Test list slice extend Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE)", "GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True)", "list_ = [x] list_[0] = 100 return list_ net = Net() net(Tensor(0)) def", "context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5] b = [5, 6, 7,", "writing, software # distributed under the License is distributed on an \"AS IS\"", "x], [[x, x], [x, x]]] list_value[2][0][1] = value return self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def", "-9, -3) assert graph_out == python_out def test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List assign Description:", "graph_out = net(a, b, 0, None, 2) assert graph_out == python_out def test_list_slice_length_error():", "= [11, 22, 33, 44, 55] pynative_out = net(a, b, 1234, 0) assert", "0, None, 3) def convert_tuple(a): result = tuple() for i in a: if", "[1, 2, 3, 4, 5, 6, 7, 8, 9] b = [33, 44,", "GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) sens = Tensor(np.arange(2 * 3).reshape(2, 3))", "5, 6, 7] python_out = compare_func2(a, [], 1, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1,", "list_[2][0][2] = 302 return list_ context.set_context(mode=context.PYNATIVE_MODE) net = Net() out = net() assert", "str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 1) assert \"can", "x): list_ = [[[x, x]]] list_[0][0][0] = 100 return list_ net = Net()", "33) net = Net2() pynative_out = net(a, b, 0, 4, None) assert pynative_out", "assign Description: Test high dimension list slice assign Expectation: No exception. \"\"\" class", "300 list_[2][0][1] = 301 list_[2][0][2] = 302 return list_ context.set_context(mode=context.PYNATIVE_MODE) net = Net()", "test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List assign Description: Test negative step list slice assign Expectation:", "compliance with the License. # You may obtain a copy of the License", "Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[1][-2] =", "of size 3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, 0,", "net(a, b, 0, None, 2) assert \"must assign iterable to extended slice\" in", "b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7,", "list_[1][0] = 200 list_[1][1] = 201 return list_ net = Net() out =", "step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return a class NetInner(Cell):", "2, 3, 4] b = [5, 6, 7, 8] net = Net1() pynative_mode_out", "pynative_out = net(a, b, 0, 1) assert pynative_out == python_out a = [1,", "= net() assert list(out[0]) == [1] assert list(out[1]) == [200, 201] assert list(out[2])", "construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[1][-2] = 20 list_[1][-1]", "0, 1) assert graph_out == python_out def test_list_slice_assign(): \"\"\" Feature: List assign Description:", "b, 1, None, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b,", "b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def test_list_slice_length_equal(): \"\"\" Feature:", "list(out[0]) == [100] assert list(out[1]) == [2, 2] assert list(out[2]) == [3, 3,", "isinstance(i, list): result += (tuple(i),) continue result += (i,) return result def test_list_in_list_slice():", "self).__init__() self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, value, sens):", "def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) def", "None, 3) def convert_tuple(a): result = tuple() for i in a: if isinstance(i,", "construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][0] = 300 list_[2][0][1]", "* 3).reshape(2, 3)) grad_net(x, value, sens) class Net1(Cell): def construct(self, a, b, start=None,", "5, 5) assert pynative_out == python_out a = [1, 2, 3, 4, 5,", "import operations as P from mindspore import Tensor, ms_function from mindspore import context", "3, 3] def test_list_index_2d(): \"\"\" Feature: List index assign Description: Test list assign", "grad_net(x, sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value =", "3, 4, 5, 6, 7, 8, 9] b = (11, 22, 33) context.set_context(mode=context.GRAPH_MODE)", "= [1, 2, 3, 4, 5, 6, 7, 8, 9] b = [11,", "b = [11, 22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 5) assert", "Test list assign the size is not equal Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a", "exception. \"\"\" a = [1, 2, 3, 4, 5, 6] b = 1", "stop=None, step=None): a[start:stop:step] = b return tuple(a) def test_list_slice_shrink(): \"\"\" Feature: List assign", "to extended slice of size 3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err:", "2, 3, 4, 5, 6] b = 1 net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with", "assign Description: Test list slice extend Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a =", "test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[[x, x]]] list_[0][0][0] =", "stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return a class", "construct(self, x): list_ = [[x, x]] list_[0][0] = 100 return list_ net =", "44, 55] pynative_out = net(a, b, 0, 0) assert pynative_out == python_out a", "test_list_slice_extend_inner(): \"\"\" Feature: List assign Description: Test list slice extend Expectation: No exception.", "3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, 0, None, 2)", "Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[0] =", "= [[1], [2, 2], [[3, 3, 3]]] list_[2][0][0] = 300 list_[2][0][1] = 301", "\"\"\" Feature: List assign Description: Test high dimension list slice assign Expectation: No", "55] python_out = compare_func2(a, b, -1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a", "= net(a, b, -1, -9, -3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a =", "== [1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [300, 301, 302]", "Feature: List assign Description: Test list slice shrink assign Expectation: No exception. \"\"\"", "list assign in pynative mode Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def", "== [2, 2] assert list(out[2][0]) == [300, 301, 302] context.set_context(mode=context.GRAPH_MODE) out = net()", "== python_out def test_list_double_slice(): \"\"\" Feature: List assign Description: Test list double slice", "(the \"License\"); # you may not use this file except in compliance with", "2], [[3, 3], [3, 3]]] self.relu = P.ReLU() def construct(self, input_x): list_x =", "= b return a class NetInner(Cell): def construct(self, a, b, start1, stop1, step1,", "shrink assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4,", "[[x, x]] list_[0][0] = 100 return list_ net = Net() net(Tensor(0)) def test_list_index_3D_parameter():", "# Unless required by applicable law or agreed to in writing, software #", "exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4] b = [5, 6,", "class NetInner(Cell): def construct(self, a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2:", "by applicable law or agreed to in writing, software # distributed under the", "7, 8, 9] b = [33, 44, 55] graph_out = net(a, b, -1,", "[3, 3, 3]] list_[0] = [100] return list_ net = Net() out =", "class Net(nn.Cell): def construct(self, x): list_ = [x] list_[0] = 100 return list_", "extend Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5,", "test_list_neg_index_2d(): \"\"\" Feature: List index assign Description: Test list assign in pynative mode", "b = [11, 22, 33, 44, 55] pynative_out = net(a, b, 1234, 0)", "33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1234, 0) assert graph_out ==", "file except in compliance with the License. # You may obtain a copy", "graph_out = net(a, b, -12, 456) assert graph_out == python_out def test_list_slice_extend(): \"\"\"", "== [20, 21] assert list(out[2]) == [3, 3, 3] def test_list_index_3d(): \"\"\" Feature:", "sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, value, sens) class Net1(Cell): def construct(self,", "33, 44, 55] net = Net2() pynative_out = net(a, b, -12, 456) assert", "3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) == [200,", "test_list_slice_tuple_with_step(): \"\"\" Feature: List assign Description: Test list slice assign with tuple Expectation:", "construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) def test_list_slice_shrink():", "assert \"can only assign an iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err:", "class Net2(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b return", "test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[x, x]] list_[0][0] =", "b, 2, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) net = TestNet() a = [1, 2,", "out = net() assert list(out[0]) == [1] assert list(out[1]) == [20, 21] assert", "x]]] list_value[2][0][1] = value return self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__()", "3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [100] assert list(out[1]) ==", "list slice insert assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2,", "net(a, b, -1, -9, -3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1,", "None, 2) assert \"must assign iterable to extended slice\" in str(err.value) def test_graph_list_slice_assign_number():", "List assign Description: Test high dimension list slice assign Expectation: No exception. \"\"\"", "b = [33, 44, 55] net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err:", "mindspore.nn import Cell from mindspore.ops import composite as C from mindspore.ops import operations", "context def test_list_index_1d(): \"\"\" Feature: List index assign Description: Test list assign in", "python_out = compare_func2(a, b, -1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a =", "5, 6, 7, 8, 9] b = (11, 22, 33) python_out = compare_func2(a,", "self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, value, sens): return", "= net(a, b, 0, None, 2) assert pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out =", "5, 6, 7, 8, 9] b = [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE)", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 4, None) assert graph_out ==", "0, 1) net = Net2() a = [1, 2, 3, 4, 5, 6,", "= convert_tuple(net(a, b, 2, 1, None, 3)) assert graph_out == python_out def test_list_slice_negative_step():", "grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) value = Tensor(np.ones((2, 3),", "net(a, [], 1, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2,", "== python_out def test_list_slice_tuple_with_step(): \"\"\" Feature: List assign Description: Test list slice assign", "7, 8] net = Net1() pynative_mode_out = net(a, b, 0, None, 2) assert", "[1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [300, 301, 302] def", "graph_out == python_out def test_list_double_slice(): \"\"\" Feature: List assign Description: Test list double", "net = Net2() pynative_out = net(a, b, 1, None, 3) assert pynative_out ==", "a: if isinstance(i, list): result += (tuple(i),) continue result += (i,) return result", "self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, value, sens): return self.grad_all_with_sens(self.net)(x, value, sens)", "0, 0) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2, 3, 4, 5,", "start=None, stop=None, step=None): a[index][start:stop:step] = b return tuple(a) def com_func3(a, b, index, start=None,", "9] b = [11, 22, 33, 44, 55] net = Net2() python_out =", "Feature: List assign Description: Test list assign the size is not equal Expectation:", "input_x return self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net = net", "def __init__(self, net): super(GradNet, self).__init__() self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def", "= [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 1)", "to extended slice of size 1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as err:", "list(out[2][0]) == [30, 31, 32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x):", "None, 3) == net(a, b, 0, None, 1, 0, None, 3) def convert_tuple(a):", "44, 55] python_out = compare_func2(a, b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2,", "out = net() assert list(out[0]) == [1] assert list(out[1]) == [2, 2] assert", "[1, 2, [1, 2, 3, 4, 5, 6, 7], 8, 9] b =", "list(out[0]) == [1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [30, 31,", "[5, 6, 7, 8] python_out = compare_func1(a, b, 0, None, 2) a =", "6, 7, 8, 9] b = [11, 22, 33] python_out = compare_func2(a, b,", "b = [11, 22, 33, 44, 55] pynative_out = net(a, b, 0, 0)", "List assign Description: Test list assign the size is equal Expectation: No exception.", "python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, None, 2) assert graph_out == python_out", "class Net(nn.Cell): def construct(self, x): list_ = [[[x, x]]] list_[0][0][0] = 100 return", "= Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, value, sens) class Net1(Cell): def construct(self, a,", "= [1, 2, 3, 4, 5, 6, 7] net = Net2() pynative_out =", "22, 33, 44, 55] python_out = compare_func2(a, b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE) net =", "0) assert graph_out == python_out def test_list_slice_extend_inner(): \"\"\" Feature: List assign Description: Test", "= Net2() python_out = compare_func2(a, b, 1234, 0) a = [1, 2, 3,", "4, 5, 6, 7, 8, 9] b = [33, 44, 55] net =", "8, 9] b = [33, 44, 55] python_out = compare_func2(a, b, -1, -9,", "assign Description: Test list assign in pynative mode Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE)", "55] pynative_out = net(a, b, 0, 1) assert pynative_out == python_out a =", "slice start and stop is larger than size Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE)", "= self.value list_x[2][0][1] = input_x return self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet,", "5, 6, 7, 8, 9] b = (11, 22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out =", "b, 0, 1) assert pynative_out == python_out a = [1, 2, 3, 4,", "= [11, 22, 33, 44, 55] python_out = compare_func2(a, b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE)", "9] b = (11, 22, 33) python_out = compare_func2(a, b, 0, 4, None)", "100 return list_ net = Net() net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def", "list slice shrink assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2,", "= [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 0)", "= [[[x, x]]] list_[0][0][0] = 100 return list_ net = Net() net(Tensor(0)) def", "= net(a, b, 1234, 0) assert pynative_out == python_out a = [1, 2,", "return self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net = net self.grad_all_with_sens", "graph_out = net(a, b, 1234, 0) assert graph_out == python_out def test_list_slice_extend_front(): \"\"\"", "assign iterable to extended slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a,", "b, 0, None, 1) assert \"can only assign an iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE)", "list assign the size is equal Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a =", "9] b = [11, 22, 33] net = Net2() pynative_out = net(a, b,", "python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5, 6, 7] graph_out =", "7, 8] python_out = compare_func1(a, b, 0, None, 2) a = [1, 2,", "step list slice assign Expectation: No exception. \"\"\" a = [1, 2, 3,", "def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[0] = [100]", "3 to extended slice of size 1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as", "[3, 3]]] self.relu = P.ReLU() def construct(self, input_x): list_x = self.value list_x[2][0][1] =", "3, 3]]] list_[2][0][0] = 300 list_[2][0][1] = 301 list_[2][0][2] = 302 return list_", "7] graph_out = net(a, [], 1, 3) assert graph_out == python_out def test_list_slice_tuple_without_step():", "22, 33] assert foo(a, b, 0, None, 1, 0, None, 3) == net(a,", "3, 4, 5, 6, 7, 8, 9] b = [33, 44, 55] python_out", "[2, 2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert", "x = Tensor(np.arange(2 * 3).reshape(2, 3)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x,", "slice erase Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5,", "of size 1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as err: net(a, b, -1,", "def compare_func1(a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def test_list_slice_length_equal():", "graph_out == python_out def test_list_slice_extend_front(): \"\"\" Feature: List assign Description: Test list slice", "Description: Test list slice insert assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a =", "9] b = [33, 44, 55] net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as", "= NetInner() a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "list double slice assign Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a, b, start1,", "100 return list_ net = Net() net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def", "No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4] b = [5,", "of size 2 to extended slice of size 3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with", "test_list_slice_negative_step(): \"\"\" Feature: List assign Description: Test negative step list slice assign Expectation:", "self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, sens): return self.grad_all_with_sens(self.net)(x,", "b = [33, 44, 55] pynative_out = net(a, b, -1, -9, -3) assert", "<reponame>httpsgithu/mindspore # Copyright 2020-2022 Huawei Technologies Co., Ltd # # Licensed under the", "55] net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3,", "3], [3, 3]]] self.relu = P.ReLU() def construct(self, x, value): list_value = [[x],", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "str(err.value) def test_graph_list_slice_assign_number(): \"\"\" Feature: List assign Description: Test negative step list slice", "list slice assign Expectation: No exception. \"\"\" a = [1, 2, 3, 4,", "pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert", "0) a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b", "[[3, 3, 3]]] list_[2][0][-3] = 30 list_[2][0][-2] = 31 list_[2][0][-1] = 32 return", "assert graph_out == python_out def test_list_slice_erase(): \"\"\" Feature: List assign Description: Test list", "6, 7, 8, 9] b = [33, 44, 55] pynative_out = net(a, b,", "def construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][-3] = 30", "21 return list_ net = Net() out = net() assert list(out[0]) == [1]", "test_list_double_slice(): \"\"\" Feature: List assign Description: Test list double slice assign Expectation: ValueError", "[3, 3, 3]] list_[1][0] = 200 list_[1][1] = 201 return list_ net =", "sens_param=True) def construct(self, x, value, sens): return self.grad_all_with_sens(self.net)(x, value, sens) net = Net()", "grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) sens = Tensor(np.arange(2 *", "3, 4, 5] b = [5, 6, 7, 8] net = Net1() with", "assert list(out[0]) == [1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [30,", "return tuple(a) class Net2(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] =", "44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1234, 0) assert graph_out == python_out", "Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][0] =", "Feature: List assign Description: Test negative step list slice assign Expectation: ValueError \"\"\"", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "pytest.raises(ValueError) as err: net(a, b, -1, -3, -3) assert \"attempt to assign sequence", "Expectation: ValueError \"\"\" a = [1, 2, 3, 4, 5, 6, 7, 8,", "[3, 3, 3]] list_[1][-2] = 20 list_[1][-1] = 21 return list_ net =", "assert graph_out == python_out def test_list_slice_extend_front(): \"\"\" Feature: List assign Description: Test list", "= b[start:stop:step] return tuple(a) def test_list_slice_length_equal(): \"\"\" Feature: List assign Description: Test list", "assign Description: Test negative step list slice assign Expectation: No exception. \"\"\" a", "class Net(nn.Cell): def construct(self, x): list_ = [[x, x]] list_[0][0] = 100 return", "Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the", "= 31 list_[2][0][-1] = 32 return list_ net = Net() out = net()", "return self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net = net self.grad_all_with_sens", "\"\"\" test enumerate\"\"\" import numpy as np import pytest import mindspore.nn as nn", "composite as C from mindspore.ops import operations as P from mindspore import Tensor,", "x): list_ = [[x, x]] list_[0][0] = 100 return list_ net = Net()", "than size Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4,", "stop=None, step=None): a[start:stop:step] = b return tuple(a) class Net2(Cell): def construct(self, a, b,", "python_out def test_list_double_slice(): \"\"\" Feature: List assign Description: Test list double slice assign", "22, 33) python_out = compare_func2(a, b, 0, 4, None) context.set_context(mode=context.PYNATIVE_MODE) a = [1,", "pytest.raises(TypeError) as err: net(a, b, 0, None, 2) assert \"must assign iterable to", "55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 0) assert graph_out == python_out def", "in a: if isinstance(i, list): result += (tuple(i),) continue result += (i,) return", "compare_func1(a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def test_list_slice_length_equal(): \"\"\"", "a = [1, 2, 3, 4, 5, 6] b = 1 net =", "-12, 456) a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "[1, 2, 3, 4, 5] b = [5, 6, 7, 8] net =", "list(out[2][0]) == [300, 301, 302] def test_list_neg_index_3d(): \"\"\" Feature: List index assign Description:", "assert list(out[1]) == [20, 21] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out", "list slice assign Expectation: No exception. \"\"\" class TestNet(Cell): def construct(self, a, b,", "7], 8, 9] b = [1111, 2222] pynative_out = convert_tuple(net(a, b, 2, 1,", "the License for the specific language governing permissions and # limitations under the", "mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore", "assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a, b, 2, 1, None, 3))", "33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 5, 5) assert graph_out ==", "pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, None, 2) assert graph_out", "3, 4, 5, 6, 7, 8, 9] b = [11, 22, 33] context.set_context(mode=context.GRAPH_MODE)", "9] b = [33, 44, 55] pynative_out = net(a, b, -1, -9, -3)", "4, 5, 6, 7] python_out = compare_func2(a, [], 1, 3) context.set_context(mode=context.PYNATIVE_MODE) a =", "0, 4, None) assert pynative_out == python_out a = [1, 2, 3, 4,", "__init__(self, net): super(GradNet, self).__init__() self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self,", "net(a, b, 0, None, 2) assert pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a,", "list(out[1]) == [200, 201] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_2d(): \"\"\"", "index, start=None, stop=None, step=None): a[index][start:stop:step] = b return tuple(a) def com_func3(a, b, index,", "= net(a, b, 0, 4, None) assert graph_out == python_out def test_list_slice_tuple_with_step(): \"\"\"", "8, 9] b = [11, 22, 33] python_out = compare_func2(a, b, 0, 5)", "== python_out def test_list_slice_tuple_without_step(): \"\"\" Feature: List assign Description: Test list slice assign", "8, 9] b = (11, 22, 33) python_out = compare_func2(a, b, 0, 4,", "b return a class NetInner(Cell): def construct(self, a, b, start1, stop1, step1, start2,", "def construct(self, x): list_ = [[[x, x]]] list_[0][0][0] = 100 return list_ net", "construct(self, a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] =", "(tuple(i),) continue result += (i,) return result def test_list_in_list_slice(): \"\"\" Feature: List assign", "net = Net2() pynative_out = net(a, [], 1, 3) assert pynative_out == python_out", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "assign Description: Test list assign in pynative mode Expectation: No exception. \"\"\" class", "3, 3]]] list_[2][0][-3] = 30 list_[2][0][-2] = 31 list_[2][0][-1] = 32 return list_", "= Net() grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) value =", "Net2() pynative_out = net(a, b, -12, 456) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out", "No exception. \"\"\" a = [1, 2, 3, 4, 5, 6] b =", "[], 1, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3,", "NetInner(Cell): def construct(self, a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2:", "b, -1, -9, -3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2,", "step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def test_list_slice_length_equal(): \"\"\" Feature: List assign Description:", "extended slice of size 1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as err: net(a,", "= b return tuple(a) def test_list_slice_shrink(): \"\"\" Feature: List assign Description: Test list", "python_out def test_list_slice_extend_front(): \"\"\" Feature: List assign Description: Test list slice extend Expectation:", "graph_out == python_out def test_list_slice_insert(): \"\"\" Feature: List assign Description: Test list slice", "[2, 2], [[3, 3], [3, 3]]] self.relu = P.ReLU() def construct(self, x, value):", "graph_out == python_out def test_list_slice_length_error(): \"\"\" Feature: List assign Description: Test list assign", "= [5, 6, 7, 8] python_out = compare_func1(a, b, 0, None, 2) a", "3, 4] b = [5, 6, 7, 8] python_out = compare_func1(a, b, 0,", "b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return convert_tuple(a) a = [1,", "Description: Test list double slice assign Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a,", "4, 5, 6, 7] graph_out = net(a, [], 1, 3) assert graph_out ==", "def test_list_neg_index_2d(): \"\"\" Feature: List index assign Description: Test list assign in pynative", "def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value = [[1], [2,", "[5, 6, 7, 8] net = Net1() with pytest.raises(ValueError) as err: net(a, b,", "9] b = [33, 44, 55] python_out = compare_func2(a, b, -1, -9, -3)", "33, 44, 55] python_out = compare_func2(a, b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE) a = [1,", "= [11, 22, 33, 44, 55] pynative_out = net(a, b, 0, 1) assert", "[1] assert list(out[1]) == [20, 21] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE)", "== [1] assert list(out[1]) == [20, 21] assert list(out[2]) == [3, 3, 3]", "python_out a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b", "pynative_out = net(a, b, 1, None, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out", "x, sens): return self.grad_all_with_sens(self.net)(x, sens) net = Net() grad_net = GradNet(net) x =", "22, 33, 44, 55] python_out = compare_func2(a, b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE) a =", "Version 2.0 (the \"License\"); # you may not use this file except in", "b = (11, 22, 33) python_out = compare_func2(a, b, 0, 4, None) context.set_context(mode=context.PYNATIVE_MODE)", "start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def compare_func1(a, b, start=None, stop=None,", "-3) assert graph_out == python_out def test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List assign Description: Test", "= [11, 22, 33, 44, 55] pynative_out = net(a, b, 0, 0) assert", "list_ net = Net() net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x):", "context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 1) assert graph_out == python_out def test_list_slice_assign():", "import composite as C from mindspore.ops import operations as P from mindspore import", "3, 4] b = [5, 6, 7, 8] net = Net1() pynative_mode_out =", "with pytest.raises(ValueError) as err: net(a, b, 0, None, 2) assert \"attempt to assign", "3, 3] def test_list_neg_index_2d(): \"\"\" Feature: List index assign Description: Test list assign", "5, 6, 7, 8, 9] b = [11, 22, 33, 44, 55] net", "1, None, 3) assert graph_out == python_out def test_list_double_slice(): \"\"\" Feature: List assign", "3) def convert_tuple(a): result = tuple() for i in a: if isinstance(i, list):", "License. # ============================================================================ \"\"\" test enumerate\"\"\" import numpy as np import pytest import", "Net(nn.Cell): def construct(self, x): list_ = [[x, x]] list_[0][0] = 100 return list_", "b = [11, 22, 33, 44, 55] python_out = compare_func2(a, b, 5, 5)", "self.value = [[1], [2, 2], [[3, 3], [3, 3]]] self.relu = P.ReLU() def", "graph_out == python_out def test_list_slice_tuple_with_step(): \"\"\" Feature: List assign Description: Test list slice", "22, 33, 44, 55] net = Net2() pynative_out = net(a, b, -12, 456)", "6, 7] python_out = compare_func2(a, [], 1, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2,", "8] net = Net1() pynative_mode_out = net(a, b, 0, None, 2) assert pynative_mode_out", "* 3).reshape(2, 3)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, sens) def test_parameter_list_index_3D_bprop():", "assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, -12, 456) assert graph_out", "3\" in str(err.value) def compare_func2(a, b, start=None, stop=None, step=None): a[start:stop:step] = b return", "return tuple(a) def test_list_slice_shrink(): \"\"\" Feature: List assign Description: Test list slice shrink", "3, 4, 5, 6, 7, 8, 9] b = [33, 44, 55] pynative_out", "1234, 0) a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "7, 8, 9] b = (11, 22, 33) python_out = compare_func2(a, b, 0,", "31, 32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [x]", "a = [1, 2, 3, 4, 5] b = [5, 6, 7, 8]", "[[[x, x]]] list_[0][0][0] = 100 return list_ net = Net() net(Tensor(0)) def test_const_list_index_3D_bprop():", "1, 3) assert graph_out == python_out def test_list_slice_tuple_without_step(): \"\"\" Feature: List assign Description:", "def test_list_slice_length_equal(): \"\"\" Feature: List assign Description: Test list assign the size is", "b, 1234, 0) assert pynative_out == python_out a = [1, 2, 3, 4,", "\"\"\" Feature: List assign Description: Test list assign the size is not equal", "start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return tuple(a)", "b = [1111, 2222] python_out = com_func3(a, b, 2, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE)", "= [1, 2, 3, 4] b = [5, 6, 7, 8] net =", "6, 7, 8, 9] b = [33, 44, 55] net = Net2() context.set_context(mode=context.PYNATIVE_MODE)", "sequence of size 3 to extended slice of size 1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE)", "3)) grad_net(x, value, sens) class Net1(Cell): def construct(self, a, b, start=None, stop=None, step=None):", "= 1 net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0,", "Description: Test list slice extend Expectation: No exception. \"\"\" a = [1, 2,", "= 200 list_[1][1] = 201 return list_ net = Net() out = net()", "[2, 2], [3, 3, 3]] list_[0] = [100] return list_ net = Net()", "slice of size 3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b,", "pynative_out = net(a, b, 1234, 0) assert pynative_out == python_out a = [1,", "result += (i,) return result def test_list_in_list_slice(): \"\"\" Feature: List assign Description: Test", "double slice assign Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a, b, start1, stop1,", "import context def test_list_index_1d(): \"\"\" Feature: List index assign Description: Test list assign", "assert list(out[1]) == [200, 201] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_2d():", "44, 55] graph_out = net(a, b, -1, -9, -3) assert graph_out == python_out", "6, 7, 8, 9] b = [11, 22, 33, 44, 55] net =", "b, 0, 1) net = Net2() a = [1, 2, 3, 4, 5,", "== [2, 2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net()", "start=None, stop=None, step=None): a[index][start:stop:step] = b return convert_tuple(a) a = [1, 2, [1,", "Net() grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) sens = Tensor(np.arange(2", "assert list(out[1]) == [200, 201] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out", "net(a, b, 0, 4, None) assert pynative_out == python_out a = [1, 2,", "in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 2) assert", "\"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3,", "net(a, b, -12, 456) assert graph_out == python_out def test_list_slice_extend(): \"\"\" Feature: List", "assign sequence of size 3 to extended slice of size 1\" in str(err.value)", "equal Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5] b", "list_ net = Net() net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x):", "step2): a[start1:stop1:step1][start2: stop2: step2] = b return tuple(a) net = NetInner() a =", "7, 8, 9] b = [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out =", "list(out[1]) == [2, 2] assert list(out[2][0]) == [300, 301, 302] context.set_context(mode=context.GRAPH_MODE) out =", "= 100 return list_ net = Net() net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell):", "= 201 return list_ net = Net() out = net() assert list(out[0]) ==", "list_ net = Net() out = net() assert list(out[0]) == [1] assert list(out[1])", "slice assign Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5,", "list(out[1]) == [2, 2] assert list(out[2][0]) == [30, 31, 32] context.set_context(mode=context.GRAPH_MODE) out =", "python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1, None, 3) assert graph_out == python_out", "4, 5] b = [5, 6, 7, 8] net = Net1() with pytest.raises(ValueError)", "assert graph_out == python_out def test_list_slice_tuple_without_step(): \"\"\" Feature: List assign Description: Test list", "8] python_out = compare_func1(a, b, 0, None, 2) a = [1, 2, 3,", "2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0])", "net(a, b, -1, -3, -3) assert \"attempt to assign sequence of size 3", "Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0", "self.value list_x[2][0][1] = input_x return self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__()", "test_list_index_2d(): \"\"\" Feature: List index assign Description: Test list assign in pynative mode", "a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b =", "b = [11, 22, 33, 44, 55] net = Net2() pynative_out = net(a,", "List assign Description: Test list slice erase Expectation: No exception. \"\"\" a =", "None, 2) a = [1, 2, 3, 4] b = [5, 6, 7,", "context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3, 3]]", "= b return tuple(a) class Net2(Cell): def construct(self, a, b, start=None, stop=None, step=None):", "a[start:stop:step] = b[start:stop:step] return tuple(a) def test_list_slice_length_equal(): \"\"\" Feature: List assign Description: Test", "1, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4,", "3]]] self.relu = P.ReLU() def construct(self, input_x): list_x = self.value list_x[2][0][1] = input_x", "b = [11, 22, 33, 44, 55] pynative_out = net(a, b, 0, 1)", "[11, 22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 5) assert graph_out ==", "= [1, 2, [1, 2, 3, 4, 5, 6, 7], 8, 9] b", "net(a, b, 1234, 0) assert graph_out == python_out def test_list_slice_extend_front(): \"\"\" Feature: List", "mindspore.nn as nn from mindspore.nn import Cell from mindspore.ops import composite as C", "5, 6, 7, 8, 9] b = [11, 22, 33] assert foo(a, b,", "0, 5) assert graph_out == python_out def test_list_slice_insert(): \"\"\" Feature: List assign Description:", "numpy as np import pytest import mindspore.nn as nn from mindspore.nn import Cell", "Net() net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[[x,", "= Net1() pynative_mode_out = net(a, b, 0, None, 2) assert pynative_mode_out == python_out", "OF ANY KIND, either express or implied. # See the License for the", "to extended slice of size 3\" in str(err.value) def compare_func2(a, b, start=None, stop=None,", "step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def compare_func1(a, b, start=None, stop=None, step=None): a[start:stop:step]", "python_out = compare_func2(a, b, 0, 1) net = Net2() a = [1, 2,", "== python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, None, 2) assert graph_out ==", "iterable\" in str(err.value) def test_list_slice_negetive_error(): \"\"\" Feature: List assign Description: Test negative step", "list_[0][0] = 100 return list_ net = Net() net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class", "net = Net2() pynative_out = net(a, b, 0, 4, None) assert pynative_out ==", "list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_1d(): \"\"\"", "-1, -3, -3) assert \"attempt to assign sequence of size 3 to extended", "b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return", "9] b = (11, 22, 33) python_out = compare_func2(a, b, 1, None, 3)", "3]] list_[0] = [100] return list_ net = Net() out = net() assert", "= 100 return list_ net = Net() net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell):", "8, 9] b = [11, 22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0,", "net() assert list(out[0]) == [1] assert list(out[1]) == [20, 21] assert list(out[2]) ==", "class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value = [[1], [2, 2], [[3, 3],", "import numpy as np import pytest import mindspore.nn as nn from mindspore.nn import", "as np import pytest import mindspore.nn as nn from mindspore.nn import Cell from", "= net(a, b, 0, 5) assert graph_out == python_out def test_list_slice_insert(): \"\"\" Feature:", "0, 4, None) assert graph_out == python_out def test_list_slice_tuple_with_step(): \"\"\" Feature: List assign", "assert list(out[0]) == [1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [300,", "is not equal Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4,", "__init__(self): super(Net, self).__init__() self.value = [[1], [2, 2], [[3, 3], [3, 3]]] self.relu", "compare_func1(a, b, 0, None, 2) a = [1, 2, 3, 4] b =", "pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5, 6, 7]", "== [300, 301, 302] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert", "construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][-3] = 30 list_[2][0][-2]", "0, None, 2) assert \"attempt to assign sequence of size 2 to extended", "5, 6] b = 1 net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err:", "Test list slice extend Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2,", "[100] return list_ net = Net() out = net() assert list(out[0]) == [100]", "for i in a: if isinstance(i, list): result += (tuple(i),) continue result +=", "== python_out def test_list_slice_assign(): \"\"\" Feature: List assign Description: Test list slice start", "def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[-3] = [100]", "5, 6, 7, 8, 9] b = [11, 22, 33, 44, 55] pynative_out", "context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 1) assert \"can only", "\"\"\" class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]]", "pynative_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE)", "21] assert list(out[2]) == [3, 3, 3] def test_list_index_3d(): \"\"\" Feature: List index", "def test_list_neg_index_3d(): \"\"\" Feature: List index assign Description: Test list assign in pynative", "assert list(out[2][0]) == [30, 31, 32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self,", "or agreed to in writing, software # distributed under the License is distributed", "2] assert list(out[2][0]) == [30, 31, 32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def", "python_out def test_list_slice_erase(): \"\"\" Feature: List assign Description: Test list slice erase Expectation:", "import Tensor, ms_function from mindspore import context def test_list_index_1d(): \"\"\" Feature: List index", "2) assert \"must assign iterable to extended slice\" in str(err.value) def test_graph_list_slice_assign_number(): \"\"\"", "a[start:stop:step] = b[start:stop:step] return tuple(a) def compare_func1(a, b, start=None, stop=None, step=None): a[start:stop:step] =", "assert list(out[0]) == [1] assert list(out[1]) == [200, 201] assert list(out[2]) == [3,", "33] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 5) assert graph_out == python_out def", "Feature: List index assign Description: Test list assign in pynative mode Expectation: No", "No exception. \"\"\" class TestNet(Cell): def construct(self, a, b, index, start=None, stop=None, step=None):", "= Tensor(np.arange(2 * 3).reshape(2, 3)) value = Tensor(np.ones((2, 3), np.int64)) sens = Tensor(np.arange(2", "value, sens): return self.grad_all_with_sens(self.net)(x, value, sens) net = Net() grad_net = GradNet(net) x", "net(a, b, 5, 5) assert graph_out == python_out def test_list_slice_erase(): \"\"\" Feature: List", "python_out def test_list_slice_insert(): \"\"\" Feature: List assign Description: Test list slice insert assign", "class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[1][-2]", "= [[x, x]] list_[0][0] = 100 return list_ net = Net() net(Tensor(0)) def", "Tensor(np.arange(2 * 3).reshape(2, 3)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, sens) def", "list slice start and stop is larger than size Expectation: No exception. \"\"\"", "3, 4, 5, 6, 7, 8, 9] b = [11, 22, 33] net", "5, 6, 7, 8, 9] b = [11, 22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out =", "Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, value, sens) class Net1(Cell): def construct(self, a, b,", "[1, 2, 3, 4, 5, 6, 7] python_out = compare_func2(a, [], 1, 3)", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "to extended slice\" in str(err.value) def test_graph_list_slice_assign_number(): \"\"\" Feature: List assign Description: Test", "def construct(self, x, value, sens): return self.grad_all_with_sens(self.net)(x, value, sens) net = Net() grad_net", "assert graph_out == python_out def test_list_slice_tuple_with_step(): \"\"\" Feature: List assign Description: Test list", "6, 7, 8, 9] b = (11, 22, 33) net = Net2() pynative_out", "list slice assign Expectation: ValueError \"\"\" a = [1, 2, 3, 4, 5,", "License. # You may obtain a copy of the License at # #", "pynative_out = net(a, b, 0, 0) assert pynative_out == python_out a = [1,", "in str(err.value) def test_graph_list_slice_assign_number(): \"\"\" Feature: List assign Description: Test negative step list", "= net(a, b, 0, 0) assert graph_out == python_out def test_list_slice_extend_inner(): \"\"\" Feature:", "= compare_func2(a, b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5,", "return a class NetInner(Cell): def construct(self, a, b, start1, stop1, step1, start2, stop2,", "(11, 22, 33) net = Net2() pynative_out = net(a, b, 1, None, 3)", "[1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [30, 31, 32] def", "graph_out == python_out def test_list_slice_tuple_without_step(): \"\"\" Feature: List assign Description: Test list slice", "b, 0, None, 2) a = [1, 2, 3, 4] b = [5,", "a[start1:stop1:step1][start2: stop2: step2] = b return tuple(a) net = NetInner() a = [1,", "b = [11, 22, 33, 44, 55] python_out = compare_func2(a, b, 0, 0)", "b = [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0,", "-3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5,", "= Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 1) assert", "1, None, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "= [1, 2, 3, 4, 5, 6, 7, 8, 9] b = [33,", "def test_list_slice_extend_inner(): \"\"\" Feature: List assign Description: Test list slice extend Expectation: No", "compare_func2(a, b, 0, 5) a = [1, 2, 3, 4, 5, 6, 7,", "2], [[3, 3], [3, 3]]] self.relu = P.ReLU() def construct(self, x, value): list_value", "assign the size is not equal Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a = [1,", "test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [x] list_[0] = 100", "3] def test_list_neg_index_1d(): \"\"\" Feature: List index assign Description: Test list assign in", "b, 0, None, 2) assert \"attempt to assign sequence of size 2 to", "[2, 2] assert list(out[2][0]) == [300, 301, 302] context.set_context(mode=context.GRAPH_MODE) out = net() assert", "b, -1, -3, -3) assert \"attempt to assign sequence of size 3 to", "None, 1, 0, None, 3) == net(a, b, 0, None, 1, 0, None,", "2, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) net = TestNet() a = [1, 2, [1,", "[11, 22, 33, 44, 55] net = Net2() pynative_out = net(a, b, -12,", "list_[0][0][0] = 100 return list_ net = Net() net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class", "start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return a", "test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value = [[1], [2, 2],", "list assign the size is not equal Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a =", "def foo(a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] =", "44, 55] pynative_out = net(a, b, 1234, 0) assert pynative_out == python_out a", "License, Version 2.0 (the \"License\"); # you may not use this file except", "as err: net(a, b, 0, None, 1) assert \"can only assign an iterable\"", "55] python_out = compare_func2(a, b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a =", "= 30 list_[2][0][-2] = 31 list_[2][0][-1] = 32 return list_ net = Net()", "9] b = (11, 22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 4,", "[1111, 2222] python_out = com_func3(a, b, 2, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) net =", "grad_net(x, value, sens) class Net1(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step]", "55] net = Net2() python_out = compare_func2(a, b, 1234, 0) a = [1,", "slice extend Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4,", "33) python_out = compare_func2(a, b, 0, 4, None) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2,", "def test_list_index_2d(): \"\"\" Feature: List index assign Description: Test list assign in pynative", "[11, 22, 33, 44, 55] pynative_out = net(a, b, 0, 1) assert pynative_out", "[2, 2] assert list(out[2][0]) == [30, 31, 32] context.set_context(mode=context.GRAPH_MODE) out = net() assert", "3).reshape(2, 3)) grad_net(x, value, sens) class Net1(Cell): def construct(self, a, b, start=None, stop=None,", "step=None): a[start:stop:step] = b return tuple(a) def test_list_slice_shrink(): \"\"\" Feature: List assign Description:", "convert_tuple(net(a, b, 2, 1, None, 3)) assert graph_out == python_out def test_list_slice_negative_step(): \"\"\"", "test_list_slice_insert(): \"\"\" Feature: List assign Description: Test list slice insert assign Expectation: No", "== [3, 3, 3] def test_list_index_2d(): \"\"\" Feature: List index assign Description: Test", "33, 44, 55] python_out = compare_func2(a, b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE) net = Net2()", "com_func3(a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return convert_tuple(a) a =", "stop2: step2] = b return a class NetInner(Cell): def construct(self, a, b, start1,", "self.grad_all_with_sens(self.net)(x, sens) net = Net() grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2,", "pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1, None, 3) assert graph_out", "extend Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5, 6,", "5, 6, 7], 8, 9] b = [1111, 2222] python_out = com_func3(a, b,", "specific language governing permissions and # limitations under the License. # ============================================================================ \"\"\"", "32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [x] list_[0]", "6, 7], 8, 9] b = [1111, 2222] python_out = com_func3(a, b, 2,", "extended slice\" in str(err.value) def test_graph_list_slice_assign_number(): \"\"\" Feature: List assign Description: Test negative", "200 list_[1][1] = 201 return list_ net = Net() out = net() assert", "assert list(out[1]) == [2, 2] assert list(out[2][0]) == [30, 31, 32] context.set_context(mode=context.GRAPH_MODE) out", "python_out def test_list_slice_extend_inner(): \"\"\" Feature: List assign Description: Test list slice extend Expectation:", "= Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 2) assert", "[3, 3, 3] def test_list_index_3d(): \"\"\" Feature: List index assign Description: Test list", "Feature: List assign Description: Test list slice insert assign Expectation: No exception. \"\"\"", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "python_out = compare_func2(a, [], 1, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4,", "step2] = b return tuple(a) net = NetInner() a = [1, 2, 3,", "Test list slice extend Expectation: No exception. \"\"\" a = [1, 2, 3,", "list_[2][0][1] = 301 list_[2][0][2] = 302 return list_ context.set_context(mode=context.PYNATIVE_MODE) net = Net() out", "def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def", "b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) def test_list_slice_shrink(): \"\"\" Feature:", "pynative_out = net(a, [], 1, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a =", "stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return a class NetInner(Cell): def construct(self,", "= [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1234, 0)", "test_list_index_3d(): \"\"\" Feature: List index assign Description: Test list assign in pynative mode", "def construct(self, x): list_ = [[x, x]] list_[0][0] = 100 return list_ net", "None, 3)) assert graph_out == python_out def test_list_slice_negative_step(): \"\"\" Feature: List assign Description:", "6, 7, 8] python_out = compare_func1(a, b, 0, None, 2) a = [1,", "from mindspore.ops import operations as P from mindspore import Tensor, ms_function from mindspore", "test_list_slice_erase(): \"\"\" Feature: List assign Description: Test list slice erase Expectation: No exception.", "1, 0, None, 3) def convert_tuple(a): result = tuple() for i in a:", "\"\"\" Feature: List assign Description: Test list double slice assign Expectation: ValueError \"\"\"", "with pytest.raises(TypeError) as err: net(a, b, 0, None, 2) assert \"must assign iterable", "None, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8,", "pytest.raises(TypeError) as err: net(a, b, 0, None, 1) assert \"can only assign an", "size is equal Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3,", "44, 55] python_out = compare_func2(a, b, -1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE) net = Net2()", "context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [x] list_[0] = 100 return", "self.grad_all_with_sens(self.net)(x, value, sens) net = Net() grad_net = GradNet(net) x = Tensor(np.arange(2 *", "self).__init__() self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, sens): return", "start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def test_list_slice_length_equal(): \"\"\" Feature: List", "assert \"can only assign an iterable\" in str(err.value) def test_list_slice_negetive_error(): \"\"\" Feature: List", "3, 4, 5, 6, 7], 8, 9] b = [1111, 2222] python_out =", "= [5, 6, 7, 8] net = Net1() with pytest.raises(ValueError) as err: net(a,", "[1] assert list(out[1]) == [200, 201] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE)", "7] net = Net2() pynative_out = net(a, [], 1, 3) assert pynative_out ==", "[1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [30, 31, 32] context.set_context(mode=context.GRAPH_MODE)", "3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7] net =", "or implied. # See the License for the specific language governing permissions and", "2, 3, 4, 5, 6, 7, 8, 9] b = [33, 44, 55]", "44, 55] net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, -1,", "convert_tuple(net(a, b, 2, 1, None, 3)) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out =", "list(out[2]) == [3, 3, 3] def test_list_index_3d(): \"\"\" Feature: List index assign Description:", "= (11, 22, 33) net = Net2() pynative_out = net(a, b, 0, 4,", "b, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6,", "construct(self, a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return tuple(a) def", "context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert graph_out == python_out", "Net2() pynative_out = net(a, b, 5, 5) assert pynative_out == python_out a =", "value = Tensor(np.ones((2, 3), np.int64)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, value,", "[11, 22, 33] python_out = compare_func2(a, b, 0, 5) a = [1, 2,", "super(Net, self).__init__() self.value = [[1], [2, 2], [[3, 3], [3, 3]]] self.relu =", "[1, 2, 3, 4] b = [5, 6, 7, 8] net = Net1()", "22, 33) net = Net2() pynative_out = net(a, b, 1, None, 3) assert", "[1, 2, 3, 4, 5, 6, 7] graph_out = net(a, [], 1, 3)", "= b return tuple(a) net = NetInner() a = [1, 2, 3, 4,", "7], 8, 9] b = [1111, 2222] python_out = com_func3(a, b, 2, 1,", "list(out[0]) == [1] assert list(out[1]) == [2, 2] assert list(out[2][0]) == [300, 301,", "of size 3\" in str(err.value) def compare_func2(a, b, start=None, stop=None, step=None): a[start:stop:step] =", "[2, 2], [[3, 3, 3]]] list_[2][0][-3] = 30 list_[2][0][-2] = 31 list_[2][0][-1] =", "-3) assert \"attempt to assign sequence of size 3 to extended slice of", "pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5, 6, 7,", "a = [1, 2, 3, 4, 5, 6, 7] net = Net2() pynative_out", "== [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) net = Net() out = net() assert list(out[0])", "Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5] b =", "33, 44, 55] net = Net2() pynative_out = net(a, b, 5, 5) assert", "sens) net = Net() grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3))", "assert pynative_out == python_out a = [1, 2, 3, 4, 5, 6, 7,", "context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) == [20, 21]", "Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5, 6, 7,", "-9, -3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4,", "NetInner() a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b", "\"\"\" Feature: List assign Description: Test list slice assign with tuple Expectation: No", "def __init__(self): super(Net, self).__init__() self.value = [[1], [2, 2], [[3, 3], [3, 3]]]", "list_ = [[1], [2, 2], [3, 3, 3]] list_[1][-2] = 20 list_[1][-1] =", "3).reshape(2, 3)) value = Tensor(np.ones((2, 3), np.int64)) sens = Tensor(np.arange(2 * 3).reshape(2, 3))", "4, 5, 6, 7, 8, 9] b = [33, 44, 55] graph_out =", "assert graph_out == python_out def test_list_slice_insert(): \"\"\" Feature: List assign Description: Test list", "pynative_out == python_out a = [1, 2, 3, 4, 5, 6, 7, 8,", "b, 2, 1, None, 3)) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a,", "1234, 0) assert graph_out == python_out def test_list_slice_extend_front(): \"\"\" Feature: List assign Description:", "[], 1, 3) assert graph_out == python_out def test_list_slice_tuple_without_step(): \"\"\" Feature: List assign", "b[start:stop:step] return tuple(a) def test_list_slice_length_equal(): \"\"\" Feature: List assign Description: Test list assign", "= [[x], [x, x], [[x, x], [x, x]]] list_value[2][0][1] = value return self.relu(list_value[2][0][1])", "5) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "4, 5, 6, 7, 8, 9] b = [11, 22, 33, 44, 55]", "== python_out context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert graph_out", "= compare_func2(a, b, -12, 456) a = [1, 2, 3, 4, 5, 6,", "use this file except in compliance with the License. # You may obtain", "= net(a, b, 5, 5) assert graph_out == python_out def test_list_slice_erase(): \"\"\" Feature:", "assert graph_out == python_out def test_list_slice_negative_step(): \"\"\" Feature: List assign Description: Test negative", "[x] list_[0] = 100 return list_ net = Net() net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE)", "= Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def", "context.set_context(mode=context.PYNATIVE_MODE) net = TestNet() a = [1, 2, [1, 2, 3, 4, 5,", "construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[0] = [100] return", "301, 302] def test_list_neg_index_3d(): \"\"\" Feature: List index assign Description: Test list assign", "iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 1)", "[33, 44, 55] pynative_out = net(a, b, -1, -9, -3) assert pynative_out ==", "Net() out = net() assert list(out[0]) == [100] assert list(out[1]) == [2, 2]", "5, 6, 7, 8, 9] b = [11, 22, 33, 44, 55] python_out", "assert list(out[1]) == [20, 21] assert list(out[2]) == [3, 3, 3] def test_list_index_3d():", "8, 9] b = [33, 44, 55] graph_out = net(a, b, -1, -9,", "44, 55] net = Net2() pynative_out = net(a, b, -12, 456) assert pynative_out", "Test high dimension list slice assign Expectation: No exception. \"\"\" class TestNet(Cell): def", "302 return list_ context.set_context(mode=context.PYNATIVE_MODE) net = Net() out = net() assert list(out[0]) ==", "6, 7, 8, 9] b = [11, 22, 33, 44, 55] python_out =", "net() assert list(out[0]) == [100] assert list(out[1]) == [2, 2] assert list(out[2]) ==", "= [11, 22, 33] python_out = compare_func2(a, b, 0, 5) a = [1,", "assign iterable to extended slice\" in str(err.value) def test_graph_list_slice_assign_number(): \"\"\" Feature: List assign", "start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) class Net2(Cell): def construct(self, a,", "start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return tuple(a) net = NetInner()", "mindspore import Tensor, ms_function from mindspore import context def test_list_index_1d(): \"\"\" Feature: List", "for the specific language governing permissions and # limitations under the License. #", "assert list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out", "def construct(self, a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2]", "List index assign Description: Test list assign in pynative mode Expectation: No exception.", "None, 2) assert pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, None,", "def compare_func2(a, b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) class Net2(Cell):", "= Net() out = net() assert list(out[0]) == [100] assert list(out[1]) == [2,", "a[index][start:stop:step] = b return tuple(a) def com_func3(a, b, index, start=None, stop=None, step=None): a[index][start:stop:step]", "List assign Description: Test list slice shrink assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE)", "context.set_context(mode=context.PYNATIVE_MODE) net = Net() out = net() assert list(out[0]) == [1] assert list(out[1])", "List assign Description: Test list double slice assign Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function", "2], [3, 3, 3]] list_[1][0] = 200 list_[1][1] = 201 return list_ net", "3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5,", "== [2, 2] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_1d(): \"\"\" Feature:", "TestNet(Cell): def construct(self, a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return", "2] assert list(out[2][0]) == [300, 301, 302] def test_list_neg_index_3d(): \"\"\" Feature: List index", "3, 4, 5, 6, 7, 8, 9] b = [11, 22, 33] python_out", "convert_tuple(a) a = [1, 2, [1, 2, 3, 4, 5, 6, 7], 8,", "3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "[11, 22, 33, 44, 55] python_out = compare_func2(a, b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE) a", "22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 1) assert graph_out", "b, 1, None, 3) assert graph_out == python_out def test_list_double_slice(): \"\"\" Feature: List", "context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]]", "1 net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None,", "= P.ReLU() def construct(self, x, value): list_value = [[x], [x, x], [[x, x],", "list): result += (tuple(i),) continue result += (i,) return result def test_list_in_list_slice(): \"\"\"", "201] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_2d(): \"\"\" Feature: List index", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "= [1, 2, 3, 4, 5, 6, 7] graph_out = net(a, [], 1,", "= Tensor(np.ones((2, 3), np.int64)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, value, sens)", "2222] python_out = com_func3(a, b, 2, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) net = TestNet()", "5, 6, 7, 8, 9] b = [33, 44, 55] net = Net2()", "= [1111, 2222] python_out = com_func3(a, b, 2, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) net", "[11, 22, 33, 44, 55] net = Net2() python_out = compare_func2(a, b, 1234,", "[x, x], [[x, x], [x, x]]] list_value[2][0][1] = value return self.relu(list_value[2][0][1]) class GradNet(nn.Cell):", "Description: Test high dimension list slice assign Expectation: No exception. \"\"\" class TestNet(Cell):", "= (11, 22, 33) net = Net2() pynative_out = net(a, b, 1, None,", "python_out def test_list_slice_tuple_without_step(): \"\"\" Feature: List assign Description: Test list slice assign with", "3, 3]] list_[0] = [100] return list_ net = Net() out = net()", "b, 0, 4, None) assert graph_out == python_out def test_list_slice_tuple_with_step(): \"\"\" Feature: List", "= net(a, [], 1, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1,", "convert_tuple(a): result = tuple() for i in a: if isinstance(i, list): result +=", "5] b = [5, 6, 7, 8] net = Net1() with pytest.raises(ValueError) as", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "6, 7, 8, 9] b = [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out", "33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 1) assert graph_out ==", "net(a, b, 5, 5) assert pynative_out == python_out a = [1, 2, 3,", "== python_out def test_list_slice_negative_step(): \"\"\" Feature: List assign Description: Test negative step list", "exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8,", "from mindspore import context def test_list_index_1d(): \"\"\" Feature: List index assign Description: Test", "b, 0, None, 2) assert graph_out == python_out def test_list_slice_length_error(): \"\"\" Feature: List", "[1111, 2222] pynative_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert pynative_out ==", "== [30, 31, 32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_", "Feature: List assign Description: Test list slice start and stop is larger than", "as C from mindspore.ops import operations as P from mindspore import Tensor, ms_function", "Net1(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a)", "graph_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert graph_out == python_out def", "Expectation: No exception. \"\"\" class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2],", "= [[1], [2, 2], [3, 3, 3]] list_[-3] = [100] return list_ net", "= 300 list_[2][0][1] = 301 list_[2][0][2] = 302 return list_ context.set_context(mode=context.PYNATIVE_MODE) net =", "\"\"\" Feature: List assign Description: Test list slice erase Expectation: No exception. \"\"\"", "4, 5, 6, 7, 8, 9] b = [11, 22, 33] net =", "9] b = [11, 22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 5)", "Net(nn.Cell): def construct(self, x): list_ = [x] list_[0] = 100 return list_ net", "with the License. # You may obtain a copy of the License at", "test_list_neg_index_3d(): \"\"\" Feature: List index assign Description: Test list assign in pynative mode", "start and stop is larger than size Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a", "6, 7, 8, 9] b = [33, 44, 55] graph_out = net(a, b,", "permissions and # limitations under the License. # ============================================================================ \"\"\" test enumerate\"\"\" import", "return tuple(a) def com_func3(a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return", "[x, x]]] list_value[2][0][1] = value return self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet,", "list(out[1]) == [20, 21] assert list(out[2]) == [3, 3, 3] def test_list_index_3d(): \"\"\"", "+= (i,) return result def test_list_in_list_slice(): \"\"\" Feature: List assign Description: Test high", "3, 4, 5, 6] b = 1 net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError)", "def test_list_in_list_slice(): \"\"\" Feature: List assign Description: Test high dimension list slice assign", "\"\"\" class TestNet(Cell): def construct(self, a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] =", "9] b = [11, 22, 33, 44, 55] python_out = compare_func2(a, b, 0,", "from mindspore.nn import Cell from mindspore.ops import composite as C from mindspore.ops import", "[1] assert list(out[1]) == [20, 21] assert list(out[2]) == [3, 3, 3] def", "law or agreed to in writing, software # distributed under the License is", "Description: Test list slice shrink assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a =", "b, -1, -9, -3) assert graph_out == python_out def test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List", "class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[0]", "def test_list_index_3d(): \"\"\" Feature: List index assign Description: Test list assign in pynative", "return self.grad_all_with_sens(self.net)(x, value, sens) net = Net() grad_net = GradNet(net) x = Tensor(np.arange(2", "tuple(a) class Net2(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b", "assert list(out[2]) == [3, 3, 3] def test_list_neg_index_2d(): \"\"\" Feature: List index assign", "exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3,", "in compliance with the License. # You may obtain a copy of the", "Test list assign the size is equal Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "[[1], [2, 2], [3, 3, 3]] list_[1][-2] = 20 list_[1][-1] = 21 return", "3) assert graph_out == python_out def test_list_slice_tuple_without_step(): \"\"\" Feature: List assign Description: Test", "step list slice assign Expectation: ValueError \"\"\" a = [1, 2, 3, 4,", "test_list_in_list_slice(): \"\"\" Feature: List assign Description: Test high dimension list slice assign Expectation:", "\"can only assign an iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a,", "b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2, 3, 4,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= 302 return list_ context.set_context(mode=context.PYNATIVE_MODE) net = Net() out = net() assert list(out[0])", "list_[1][1] = 201 return list_ net = Net() out = net() assert list(out[0])", "3)) value = Tensor(np.ones((2, 3), np.int64)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x,", "err: net(a, b, 0, None, 2) assert \"must assign iterable to extended slice\"", "Feature: List assign Description: Test negative step list slice assign Expectation: No exception.", "2, 3, 4, 5, 6, 7] net = Net2() pynative_out = net(a, [],", "iterable to extended slice\" in str(err.value) def test_graph_list_slice_assign_number(): \"\"\" Feature: List assign Description:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "[[x, x], [x, x]]] list_value[2][0][1] = value return self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def __init__(self,", "C from mindspore.ops import operations as P from mindspore import Tensor, ms_function from", "net(a, b, 0, 0) assert graph_out == python_out def test_list_slice_extend_inner(): \"\"\" Feature: List", "302] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) == [2,", "0, 5) assert pynative_out == python_out a = [1, 2, 3, 4, 5,", "[2, 2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) net = Net() out", "9] b = [11, 22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b,", "3, 3]] list_[1][-2] = 20 list_[1][-1] = 21 return list_ net = Net()", "5) assert graph_out == python_out def test_list_slice_erase(): \"\"\" Feature: List assign Description: Test", "stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def test_list_slice_length_equal(): \"\"\" Feature: List assign", "Description: Test list assign the size is not equal Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE)", "context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b", "b = [33, 44, 55] graph_out = net(a, b, -1, -9, -3) assert", "def test_list_index_1d(): \"\"\" Feature: List index assign Description: Test list assign in pynative", "3, 4, 5, 6, 7, 8, 9] b = (11, 22, 33) net", "2, 3, 4, 5, 6, 7] python_out = compare_func2(a, [], 1, 3) context.set_context(mode=context.PYNATIVE_MODE)", "test_list_slice_tuple_without_step(): \"\"\" Feature: List assign Description: Test list slice assign with tuple Expectation:", "# ============================================================================ \"\"\" test enumerate\"\"\" import numpy as np import pytest import mindspore.nn", "= 32 return list_ net = Net() out = net() assert list(out[0]) ==", "list(out[2][0]) == [30, 31, 32] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1]", "class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[1][0]", "b, 0, None, 2) assert pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b,", "2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) net = Net() out =", "compare_func2(a, b, -12, 456) a = [1, 2, 3, 4, 5, 6, 7,", "1) assert \"can only assign an iterable\" in str(err.value) def test_list_slice_negetive_error(): \"\"\" Feature:", "assert list(out[2]) == [3, 3, 3] def test_list_index_3d(): \"\"\" Feature: List index assign", "2], [3, 3, 3]] list_[1][-2] = 20 list_[1][-1] = 21 return list_ net", "2020-2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version", "assert list(out[2][0]) == [300, 301, 302] def test_list_neg_index_3d(): \"\"\" Feature: List index assign", "== [20, 21] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net()", "assert graph_out == python_out def test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List assign Description: Test negative", "7, 8, 9] b = [11, 22, 33, 44, 55] python_out = compare_func2(a,", "\"\"\" Feature: List assign Description: Test list slice extend Expectation: No exception. \"\"\"", "3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) ==", "list_x[2][0][1] = input_x return self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net", "assert graph_out == python_out def test_list_slice_length_error(): \"\"\" Feature: List assign Description: Test list", "slice extend Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5,", "assign Description: Test negative step list slice assign Expectation: ValueError \"\"\" a =", "3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) == [20,", "to extended slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0,", "[1] assert list(out[1]) == [200, 201] assert list(out[2]) == [3, 3, 3] def", "[[1], [2, 2], [3, 3, 3]] list_[0] = [100] return list_ net =", "stop=None, step=None): a[index][start:stop:step] = b return tuple(a) def com_func3(a, b, index, start=None, stop=None,", "55] python_out = compare_func2(a, b, 0, 1) net = Net2() a = [1,", "the specific language governing permissions and # limitations under the License. # ============================================================================", "(11, 22, 33) net = Net2() pynative_out = net(a, b, 0, 4, None)", "an iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None,", "input_x): list_x = self.value list_x[2][0][1] = input_x return self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def __init__(self,", "P.ReLU() def construct(self, input_x): list_x = self.value list_x[2][0][1] = input_x return self.relu(list_x[2][0][1]) class", "= [1, 2, 3, 4, 5] b = [5, 6, 7, 8] net", "list(out[2]) == [3, 3, 3] def test_list_neg_index_1d(): \"\"\" Feature: List index assign Description:", "4, None) assert graph_out == python_out def test_list_slice_tuple_with_step(): \"\"\" Feature: List assign Description:", "None, 2) assert graph_out == python_out def test_list_slice_length_error(): \"\"\" Feature: List assign Description:", "= [11, 22, 33, 44, 55] python_out = compare_func2(a, b, 0, 1) net", "python_out def test_list_slice_extend(): \"\"\" Feature: List assign Description: Test list slice extend Expectation:", "== [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert", "= [11, 22, 33, 44, 55] python_out = compare_func2(a, b, -12, 456) a", "7, 8, 9] b = (11, 22, 33) net = Net2() pynative_out =", "= compare_func2(a, b, 0, 5) a = [1, 2, 3, 4, 5, 6,", "= Net2() pynative_out = net(a, b, 0, 4, None) assert pynative_out == python_out", "== [1] assert list(out[1]) == [200, 201] assert list(out[2]) == [3, 3, 3]", "456) a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b", "= (11, 22, 33) python_out = compare_func2(a, b, 0, 4, None) context.set_context(mode=context.PYNATIVE_MODE) a", "python_out = compare_func1(a, b, 0, None, 2) a = [1, 2, 3, 4]", "python_out context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert graph_out ==", "Cell from mindspore.ops import composite as C from mindspore.ops import operations as P", "P from mindspore import Tensor, ms_function from mindspore import context def test_list_index_1d(): \"\"\"", "sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value = [[1],", "np import pytest import mindspore.nn as nn from mindspore.nn import Cell from mindspore.ops", "Test negative step list slice assign Expectation: No exception. \"\"\" a = [1,", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "7, 8, 9] b = (11, 22, 33) python_out = compare_func2(a, b, 1,", "Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3, -3) assert \"attempt", "[5, 6, 7, 8] net = Net1() pynative_mode_out = net(a, b, 0, None,", "str(err.value) def test_list_slice_negetive_error(): \"\"\" Feature: List assign Description: Test negative step list slice", "list assign in pynative mode Expectation: No exception. \"\"\" class Net(nn.Cell): def construct(self):", "Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a, b, start1, stop1, step1, start2, stop2,", "self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net = net self.grad_all_with_sens =", "== [2, 2] assert list(out[2]) == [3, 3, 3] def test_list_index_2d(): \"\"\" Feature:", "operations as P from mindspore import Tensor, ms_function from mindspore import context def", "python_out = compare_func2(a, b, 0, 4, None) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3,", "(11, 22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 4, None) assert graph_out", "context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2:", "22, 33, 44, 55] pynative_out = net(a, b, 0, 1) assert pynative_out ==", "return list_ net = Net() out = net() assert list(out[0]) == [1] assert", "context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, -12, 456) assert graph_out == python_out def test_list_slice_extend():", "is larger than size Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2,", "construct(self, input_x): list_x = self.value list_x[2][0][1] = input_x return self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def", "= [1, 2, 3, 4, 5, 6, 7] python_out = compare_func2(a, [], 1,", "test_list_index_1d(): \"\"\" Feature: List index assign Description: Test list assign in pynative mode", "sens): return self.grad_all_with_sens(self.net)(x, sens) net = Net() grad_net = GradNet(net) x = Tensor(np.arange(2", "if isinstance(i, list): result += (tuple(i),) continue result += (i,) return result def", "governing permissions and # limitations under the License. # ============================================================================ \"\"\" test enumerate\"\"\"", "[100] assert list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] def", "4, None) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8,", "test enumerate\"\"\" import numpy as np import pytest import mindspore.nn as nn from", "this file except in compliance with the License. # You may obtain a", "= net(a, b, 1, None, 3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out =", "= [[1], [2, 2], [[3, 3], [3, 3]]] self.relu = P.ReLU() def construct(self,", "list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out =", "C.GradOperation(get_all=True, sens_param=True) def construct(self, x, value, sens): return self.grad_all_with_sens(self.net)(x, value, sens) net =", "1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) net = TestNet() a = [1, 2, [1, 2,", "b = [11, 22, 33] net = Net2() pynative_out = net(a, b, 0,", "= net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, value, sens): return self.grad_all_with_sens(self.net)(x,", "context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3, -3) assert \"attempt to", "x]]] list_[0][0][0] = 100 return list_ net = Net() net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE)", "sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell):", "6, 7, 8, 9] b = [11, 22, 33] net = Net2() pynative_out", "[11, 22, 33] assert foo(a, b, 0, None, 1, 0, None, 3) ==", "compare_func2(a, b, 5, 5) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6,", "9] b = (11, 22, 33) net = Net2() pynative_out = net(a, b,", "as err: net(a, b, 0, None, 2) assert \"attempt to assign sequence of", "== python_out def test_list_slice_length_error(): \"\"\" Feature: List assign Description: Test list assign the", "7, 8, 9] b = [11, 22, 33] net = Net2() pynative_out =", "Net2() pynative_out = net(a, b, 0, 4, None) assert pynative_out == python_out a", "* 3).reshape(2, 3)) grad_net(x, sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net,", "32] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) == [2,", "= net(a, b, 0, 4, None) assert pynative_out == python_out a = [1,", "negative step list slice assign Expectation: ValueError \"\"\" a = [1, 2, 3,", "context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) == [200, 201]", "= [11, 22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 5) assert graph_out", "3)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class", "list_x = self.value list_x[2][0][1] = input_x return self.relu(list_x[2][0][1]) class GradNet(nn.Cell): def __init__(self, net):", "extended slice of size 3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a,", "Feature: List assign Description: Test list double slice assign Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE)", "22, 33, 44, 55] python_out = compare_func2(a, b, -12, 456) a = [1,", "Test list assign in pynative mode Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell):", "Feature: List assign Description: Test list assign the size is equal Expectation: No", "stop2: step2] = b return tuple(a) net = NetInner() a = [1, 2,", "b return convert_tuple(a) a = [1, 2, [1, 2, 3, 4, 5, 6,", "\"\"\" Feature: List index assign Description: Test list assign in pynative mode Expectation:", "22, 33] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 5) assert graph_out == python_out", "== net(a, b, 0, None, 1, 0, None, 3) def convert_tuple(a): result =", "list slice extend Expectation: No exception. \"\"\" a = [1, 2, 3, 4,", "1) assert graph_out == python_out def test_list_slice_assign(): \"\"\" Feature: List assign Description: Test", "list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] def test_list_index_2d(): \"\"\"", "test_list_slice_assign(): \"\"\" Feature: List assign Description: Test list slice start and stop is", "\"\"\" Feature: List assign Description: Test negative step list slice assign Expectation: No", "net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 1)", "net = Net() net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_", "= Net() net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value", "9] b = [11, 22, 33, 44, 55] python_out = compare_func2(a, b, 5,", "[3, 3, 3] context.set_context(mode=context.GRAPH_MODE) net = Net() out = net() assert list(out[0]) ==", "[33, 44, 55] net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b,", "list_ = [[1], [2, 2], [3, 3, 3]] list_[-3] = [100] return list_", "a = [1, 2, 3, 4] b = [5, 6, 7, 8] python_out", "context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b", "2 to extended slice of size 3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as", "= [11, 22, 33] assert foo(a, b, 0, None, 1, 0, None, 3)", "Test list slice insert assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1,", "@ms_function def foo(a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2]", "= [1111, 2222] pynative_out = convert_tuple(net(a, b, 2, 1, None, 3)) assert pynative_out", "Co., Ltd # # Licensed under the Apache License, Version 2.0 (the \"License\");", "class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True,", "construct(self, x): list_ = [[[x, x]]] list_[0][0][0] = 100 return list_ net =", "= GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) sens = Tensor(np.arange(2 * 3).reshape(2,", "== python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5, 6, 7] graph_out", "* 3).reshape(2, 3)) value = Tensor(np.ones((2, 3), np.int64)) sens = Tensor(np.arange(2 * 3).reshape(2,", "graph_out = net(a, b, 0, 1) assert graph_out == python_out def test_list_slice_assign(): \"\"\"", "stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return tuple(a) net", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "5, 6, 7, 8, 9] b = [33, 44, 55] python_out = compare_func2(a,", "3, 4, 5, 6, 7] python_out = compare_func2(a, [], 1, 3) context.set_context(mode=context.PYNATIVE_MODE) a", "b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def compare_func1(a, b, start=None,", "assign sequence of size 2 to extended slice of size 3\" in str(err.value)", "[200, 201] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert", "Net1() with pytest.raises(ValueError) as err: net(a, b, 0, None, 2) assert \"attempt to", "a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return tuple(a) def com_func3(a,", "the License. # ============================================================================ \"\"\" test enumerate\"\"\" import numpy as np import pytest", "self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, sens): return self.grad_all_with_sens(self.net)(x, sens) net =", "44, 55] net = Net2() python_out = compare_func2(a, b, 1234, 0) a =", "Description: Test list slice assign with tuple Expectation: No exception. \"\"\" a =", "a[start:stop:step] = b return tuple(a) class Net2(Cell): def construct(self, a, b, start=None, stop=None,", "= [[1], [2, 2], [[3, 3, 3]]] list_[2][0][-3] = 30 list_[2][0][-2] = 31", "required by applicable law or agreed to in writing, software # distributed under", "4, 5, 6, 7], 8, 9] b = [1111, 2222] pynative_out = convert_tuple(net(a,", "# limitations under the License. # ============================================================================ \"\"\" test enumerate\"\"\" import numpy as", "= convert_tuple(net(a, b, 2, 1, None, 3)) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out", "b, 0, 5) assert graph_out == python_out def test_list_slice_insert(): \"\"\" Feature: List assign", "3, 3] def test_list_neg_index_1d(): \"\"\" Feature: List index assign Description: Test list assign", "1234, 0) assert pynative_out == python_out a = [1, 2, 3, 4, 5,", "class Net1(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return", "8, 9] b = (11, 22, 33) python_out = compare_func2(a, b, 1, None,", "Tensor(np.ones((2, 3), np.int64)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, value, sens) class", "22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 1234, 0) assert graph_out", "44, 55] python_out = compare_func2(a, b, -12, 456) a = [1, 2, 3,", "only assign an iterable\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b,", "0, 1) assert pynative_out == python_out a = [1, 2, 3, 4, 5,", "size 2 to extended slice of size 3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError)", "[11, 22, 33, 44, 55] python_out = compare_func2(a, b, 0, 1) net =", "= [33, 44, 55] pynative_out = net(a, b, -1, -9, -3) assert pynative_out", "= [33, 44, 55] net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a,", "22, 33] python_out = compare_func2(a, b, 0, 5) a = [1, 2, 3,", "= Net() out = net() assert list(out[0]) == [1] assert list(out[1]) == [20,", "1, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7] net", "-1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2, 3, 4,", "python_out = compare_func2(a, b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1,", "3, 4, 5, 6, 7] net = Net2() pynative_out = net(a, [], 1,", "6] b = 1 net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a,", "list_value[2][0][1] = value return self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net", "2] assert list(out[2][0]) == [300, 301, 302] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0])", "x): list_ = [x] list_[0] = 100 return list_ net = Net() net(Tensor(0))", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "8, 9] b = [33, 44, 55] pynative_out = net(a, b, -1, -9,", "8, 9] b = [11, 22, 33] net = Net2() pynative_out = net(a,", "return list_ net = Net() net(Tensor(0)) def test_const_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self):", "result def test_list_in_list_slice(): \"\"\" Feature: List assign Description: Test high dimension list slice", "7, 8, 9] b = [33, 44, 55] python_out = compare_func2(a, b, -1,", "= compare_func2(a, b, 0, 1) net = Net2() a = [1, 2, 3,", "tuple(a) def com_func3(a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return convert_tuple(a)", "construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[-3] = [100] return", "0, None, 2) assert graph_out == python_out def test_list_slice_length_error(): \"\"\" Feature: List assign", "slice of size 1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as err: net(a, b,", "= net() assert list(out[0]) == [1] assert list(out[1]) == [2, 2] assert list(out[2][0])", "list_[1][-2] = 20 list_[1][-1] = 21 return list_ net = Net() out =", "compare_func2(a, b, 1234, 0) a = [1, 2, 3, 4, 5, 6, 7,", "[2, 2] assert list(out[2][0]) == [30, 31, 32] def test_list_index_1D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell):", "30 list_[2][0][-2] = 31 list_[2][0][-1] = 32 return list_ net = Net() out", "3, 3]] list_[1][0] = 200 list_[1][1] = 201 return list_ net = Net()", "return tuple(a) def compare_func1(a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a)", "Net2() pynative_out = net(a, b, 0, 5) assert pynative_out == python_out a =", "Copyright 2020-2022 Huawei Technologies Co., Ltd # # Licensed under the Apache License,", "list(out[2][0]) == [300, 301, 302] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1]", "net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[x, x]]", "[1, 2, 3, 4, 5, 6] b = 1 net = Net2() context.set_context(mode=context.PYNATIVE_MODE)", "= [11, 22, 33, 44, 55] net = Net2() python_out = compare_func2(a, b,", "import Cell from mindspore.ops import composite as C from mindspore.ops import operations as", "net = Net2() pynative_out = net(a, b, -12, 456) assert pynative_out == python_out", "2) a = [1, 2, 3, 4] b = [5, 6, 7, 8]", "= Net2() a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "None, 3) context.set_context(mode=context.PYNATIVE_MODE) net = TestNet() a = [1, 2, [1, 2, 3,", "net = Net2() a = [1, 2, 3, 4, 5, 6, 7, 8,", "3] def test_list_index_2d(): \"\"\" Feature: List index assign Description: Test list assign in", "[3, 3, 3] def test_list_index_2d(): \"\"\" Feature: List index assign Description: Test list", "of size 2 to extended slice of size 3\" in str(err.value) def compare_func2(a,", "result = tuple() for i in a: if isinstance(i, list): result += (tuple(i),)", "enumerate\"\"\" import numpy as np import pytest import mindspore.nn as nn from mindspore.nn", "[[3, 3], [3, 3]]] self.relu = P.ReLU() def construct(self, x, value): list_value =", "list(out[0]) == [1] assert list(out[1]) == [200, 201] assert list(out[2]) == [3, 3,", "net(a, b, 1, None, 3) assert graph_out == python_out def test_list_double_slice(): \"\"\" Feature:", "sens) class Net1(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step]", "0, 4, None) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7,", "net = Net() grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) sens", "1\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3, -3)", "== python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, -12, 456) assert graph_out == python_out", "5, 6, 7, 8, 9] b = [33, 44, 55] graph_out = net(a,", "\"\"\" Feature: List assign Description: Test list assign the size is equal Expectation:", "# you may not use this file except in compliance with the License.", "[11, 22, 33] net = Net2() pynative_out = net(a, b, 0, 5) assert", "slice assign with tuple Expectation: No exception. \"\"\" a = [1, 2, 3,", "assert list(out[2]) == [3, 3, 3] def test_list_index_2d(): \"\"\" Feature: List index assign", "44, 55] python_out = compare_func2(a, b, 0, 1) net = Net2() a =", "graph_out == python_out def test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List assign Description: Test negative step", "import mindspore.nn as nn from mindspore.nn import Cell from mindspore.ops import composite as", "class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[-3]", "test_list_slice_extend_front(): \"\"\" Feature: List assign Description: Test list slice extend Expectation: No exception.", "2, 3, 4, 5, 6, 7] graph_out = net(a, [], 1, 3) assert", "assign Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a, b, start1, stop1, step1, start2,", "context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 5, 5) assert graph_out == python_out def test_list_slice_erase():", "def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[x, x]] list_[0][0]", "list_[1][-1] = 21 return list_ net = Net() out = net() assert list(out[0])", "a[start1:stop1:step1][start2: stop2: step2] = b return a class NetInner(Cell): def construct(self, a, b,", "str(err.value) def compare_func2(a, b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) class", "9] b = [1111, 2222] pynative_out = convert_tuple(net(a, b, 2, 1, None, 3))", "list_[2][0][-3] = 30 list_[2][0][-2] = 31 list_[2][0][-1] = 32 return list_ net =", "Net2(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a)", "= P.ReLU() def construct(self, input_x): list_x = self.value list_x[2][0][1] = input_x return self.relu(list_x[2][0][1])", "step2): a[start1:stop1:step1][start2: stop2: step2] = b return a class NetInner(Cell): def construct(self, a,", "graph_out == python_out def test_list_slice_assign(): \"\"\" Feature: List assign Description: Test list slice", "33, 44, 55] net = Net2() python_out = compare_func2(a, b, 1234, 0) a", "compare_func2(a, b, 0, 4, None) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5,", "def test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List assign Description: Test negative step list slice assign", "python_out = compare_func2(a, b, 1234, 0) a = [1, 2, 3, 4, 5,", "graph_out == python_out def test_list_slice_extend(): \"\"\" Feature: List assign Description: Test list slice", "a = [1, 2, [1, 2, 3, 4, 5, 6, 7], 8, 9]", "i in a: if isinstance(i, list): result += (tuple(i),) continue result += (i,)", "list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) net = Net() out = net() assert", "== [30, 31, 32] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert", "Net() grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2, 3)) value = Tensor(np.ones((2,", "201 return list_ net = Net() out = net() assert list(out[0]) == [1]", "Feature: List assign Description: Test list slice erase Expectation: No exception. \"\"\" a", "3] def test_list_index_3d(): \"\"\" Feature: List index assign Description: Test list assign in", "def test_list_slice_tuple_without_step(): \"\"\" Feature: List assign Description: Test list slice assign with tuple", "9] b = [11, 22, 33, 44, 55] pynative_out = net(a, b, 0,", "0, 5) a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "b = [5, 6, 7, 8] python_out = compare_func1(a, b, 0, None, 2)", "55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 1) assert graph_out == python_out def", "License for the specific language governing permissions and # limitations under the License.", "4, 5, 6] b = 1 net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as", "\"License\"); # you may not use this file except in compliance with the", "and # limitations under the License. # ============================================================================ \"\"\" test enumerate\"\"\" import numpy", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 0) assert graph_out == python_out def test_list_slice_extend_inner():", "List assign Description: Test list assign the size is not equal Expectation: ValueError.", "0, None, 3) == net(a, b, 0, None, 1, 0, None, 3) def", "limitations under the License. # ============================================================================ \"\"\" test enumerate\"\"\" import numpy as np", "[11, 22, 33, 44, 55] python_out = compare_func2(a, b, -12, 456) a =", "= compare_func2(a, b, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4,", "Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self):", "compare_func2(a, b, 0, 1) net = Net2() a = [1, 2, 3, 4,", "Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 2) assert \"must", "55] net = Net2() pynative_out = net(a, b, -12, 456) assert pynative_out ==", "3]] list_[1][-2] = 20 list_[1][-1] = 21 return list_ net = Net() out", "slice shrink assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3,", "3, 4, 5, 6, 7, 8, 9] b = [11, 22, 33] assert", "def test_graph_list_slice_assign_number(): \"\"\" Feature: List assign Description: Test negative step list slice assign", "net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 2)", "Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5, 6, 7]", "python_out = com_func3(a, b, 2, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) net = TestNet() a", "31, 32] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) ==", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "33, 44, 55] pynative_out = net(a, b, 1234, 0) assert pynative_out == python_out", "= [x] list_[0] = 100 return list_ net = Net() net(Tensor(0)) def test_list_index_2D_parameter():", "32 return list_ net = Net() out = net() assert list(out[0]) == [1]", "size 2 to extended slice of size 3\" in str(err.value) def compare_func2(a, b,", "[[1], [2, 2], [3, 3, 3]] list_[-3] = [100] return list_ net =", "tuple(a) def compare_func1(a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return tuple(a) def", "value, sens) net = Net() grad_net = GradNet(net) x = Tensor(np.arange(2 * 3).reshape(2,", "b = 1 net = Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b,", "4, 5, 6, 7] net = Net2() pynative_out = net(a, [], 1, 3)", "[20, 21] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert", "== python_out def test_list_slice_erase(): \"\"\" Feature: List assign Description: Test list slice erase", "============================================================================ \"\"\" test enumerate\"\"\" import numpy as np import pytest import mindspore.nn as", "= value return self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def __init__(self, net): super(GradNet, self).__init__() self.net =", "assert \"attempt to assign sequence of size 3 to extended slice of size", "= Net2() pynative_out = net(a, b, 5, 5) assert pynative_out == python_out a", "net(a, b, 0, None, 2) assert graph_out == python_out def test_list_slice_length_error(): \"\"\" Feature:", "None, 3) assert graph_out == python_out def test_list_double_slice(): \"\"\" Feature: List assign Description:", "b, -12, 456) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, -12,", "456) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, -12, 456) assert", "5, 5) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6, 7, 8,", "-3) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2, 3, 4, 5, 6,", "2], [3, 3, 3]] list_[-3] = [100] return list_ net = Net() out", "assign with tuple Expectation: No exception. \"\"\" a = [1, 2, 3, 4,", "str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(ValueError) as err: net(a, b, -1, -3, -3) assert \"attempt", "ms_function from mindspore import context def test_list_index_1d(): \"\"\" Feature: List index assign Description:", "\"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4] b = [5, 6, 7,", "def test_list_slice_length_error(): \"\"\" Feature: List assign Description: Test list assign the size is", "\"\"\" a = [1, 2, 3, 4, 5, 6] b = 1 net", "b = [11, 22, 33, 44, 55] python_out = compare_func2(a, b, 0, 1)", "context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value = [[1], [2, 2], [[3,", "33, 44, 55] python_out = compare_func2(a, b, -12, 456) a = [1, 2,", "python_out = compare_func2(a, b, 0, 5) a = [1, 2, 3, 4, 5,", "test_graph_list_slice_assign_number(): \"\"\" Feature: List assign Description: Test negative step list slice assign Expectation:", "[20, 21] assert list(out[2]) == [3, 3, 3] def test_list_index_3d(): \"\"\" Feature: List", "context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 5) assert graph_out == python_out def test_list_slice_insert():", "def test_list_slice_assign(): \"\"\" Feature: List assign Description: Test list slice start and stop", "[33, 44, 55] python_out = compare_func2(a, b, -1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE) net =", "Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4] b =", "exception. \"\"\" a = [1, 2, 3, 4, 5, 6, 7, 8, 9]", "= b return convert_tuple(a) a = [1, 2, [1, 2, 3, 4, 5,", "3]]] list_[2][0][-3] = 30 list_[2][0][-2] = 31 list_[2][0][-1] = 32 return list_ net", "assert list(out[2][0]) == [30, 31, 32] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) ==", "net = Net2() python_out = compare_func2(a, b, 1234, 0) a = [1, 2,", "value, sens) class Net1(Cell): def construct(self, a, b, start=None, stop=None, step=None): a[start:stop:step] =", "33) python_out = compare_func2(a, b, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2,", "b = [33, 44, 55] python_out = compare_func2(a, b, -1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE)", "2, 1, None, 3)) assert graph_out == python_out def test_list_slice_negative_step(): \"\"\" Feature: List", "= (11, 22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 4, None) assert", "return list_ net = Net() out = net() assert list(out[0]) == [100] assert", "21] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0])", "= compare_func2(a, b, 1234, 0) a = [1, 2, 3, 4, 5, 6,", "assign Description: Test list slice shrink assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a", "net): super(GradNet, self).__init__() self.net = net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x,", "graph_out = net(a, b, 5, 5) assert graph_out == python_out def test_list_slice_erase(): \"\"\"", "construct(self, x): list_ = [x] list_[0] = 100 return list_ net = Net()", "4, 5, 6, 7, 8, 9] b = (11, 22, 33) python_out =", "in pynative mode Expectation: No exception. \"\"\" class Net(nn.Cell): def construct(self): list_ =", "2.0 (the \"License\"); # you may not use this file except in compliance", "erase Expectation: No exception. \"\"\" a = [1, 2, 3, 4, 5, 6,", "5, 6, 7] graph_out = net(a, [], 1, 3) assert graph_out == python_out", "pytest import mindspore.nn as nn from mindspore.nn import Cell from mindspore.ops import composite", "a = [1, 2, 3, 4, 5, 6, 7] graph_out = net(a, [],", "list_[0] = 100 return list_ net = Net() net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class", "assert list(out[1]) == [2, 2] assert list(out[2][0]) == [300, 301, 302] context.set_context(mode=context.GRAPH_MODE) out", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "Test list slice start and stop is larger than size Expectation: No exception.", "[[1], [2, 2], [3, 3, 3]] list_[1][0] = 200 list_[1][1] = 201 return", "22, 33] net = Net2() pynative_out = net(a, b, 0, 5) assert pynative_out", "-12, 456) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, -12, 456)", "33, 44, 55] python_out = compare_func2(a, b, 0, 1) net = Net2() a", "def test_list_slice_negetive_error(): \"\"\" Feature: List assign Description: Test negative step list slice assign", "# # Unless required by applicable law or agreed to in writing, software", "list slice erase Expectation: No exception. \"\"\" a = [1, 2, 3, 4,", "= net(a, b, -12, 456) assert graph_out == python_out def test_list_slice_extend(): \"\"\" Feature:", "express or implied. # See the License for the specific language governing permissions", "foo(a, b, start1, stop1, step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b", "List assign Description: Test list slice insert assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE)", "net(a, b, 0, 1) assert pynative_out == python_out a = [1, 2, 3,", "= 100 return list_ net = Net() net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell):", "= C.GradOperation(get_all=True, sens_param=True) def construct(self, x, value, sens): return self.grad_all_with_sens(self.net)(x, value, sens) net", "assign Description: Test list double slice assign Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def", "either express or implied. # See the License for the specific language governing", "Tensor, ms_function from mindspore import context def test_list_index_1d(): \"\"\" Feature: List index assign", "assign in pynative mode Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self):", "[1, 2, 3, 4, 5, 6, 7] net = Net2() pynative_out = net(a,", "3, 4, 5, 6, 7] graph_out = net(a, [], 1, 3) assert graph_out", "def construct(self, input_x): list_x = self.value list_x[2][0][1] = input_x return self.relu(list_x[2][0][1]) class GradNet(nn.Cell):", "compare_func2(a, b, 0, 0) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2, 3,", "b, 0, None, 2) assert \"must assign iterable to extended slice\" in str(err.value)", "def test_list_double_slice(): \"\"\" Feature: List assign Description: Test list double slice assign Expectation:", "def com_func3(a, b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return convert_tuple(a) a", "b, -12, 456) a = [1, 2, 3, 4, 5, 6, 7, 8,", "net = Net2() pynative_out = net(a, b, 5, 5) assert pynative_out == python_out", "0, 0) assert graph_out == python_out def test_list_slice_extend_inner(): \"\"\" Feature: List assign Description:", "= Net() net(Tensor(0)) def test_list_index_2D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ =", "b, 1234, 0) assert graph_out == python_out def test_list_slice_extend_front(): \"\"\" Feature: List assign", "4, 5, 6, 7, 8, 9] b = [11, 22, 33] python_out =", "[2, 2], [3, 3, 3]] list_[1][-2] = 20 list_[1][-1] = 21 return list_", "net = Net() out = net() assert list(out[0]) == [1] assert list(out[1]) ==", "\"\"\" Feature: List assign Description: Test list slice insert assign Expectation: No exception.", "0, None, 2) assert \"must assign iterable to extended slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE)", "the License. # You may obtain a copy of the License at #", "size is not equal Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3,", "b, 0, 5) assert pynative_out == python_out a = [1, 2, 3, 4,", "-1, -9, -3) assert graph_out == python_out def test_graph_list_slice_assign_extended_number(): \"\"\" Feature: List assign", "= [33, 44, 55] graph_out = net(a, b, -1, -9, -3) assert graph_out", "= TestNet() a = [1, 2, [1, 2, 3, 4, 5, 6, 7],", "graph_out = net(a, b, 0, 4, None) assert graph_out == python_out def test_list_slice_tuple_with_step():", "net(a, b, -12, 456) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b,", "= net(a, b, 0, 5) assert pynative_out == python_out a = [1, 2,", "List assign Description: Test negative step list slice assign Expectation: ValueError \"\"\" a", "2, 1, None, 3)) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a, b,", "= net() assert list(out[0]) == [1] assert list(out[1]) == [20, 21] assert list(out[2])", "Net2() python_out = compare_func2(a, b, 1234, 0) a = [1, 2, 3, 4,", "5, 6, 7, 8, 9] b = (11, 22, 33) net = Net2()", "the size is not equal Expectation: ValueError. \"\"\" context.set_context(mode=context.GRAPH_MODE) a = [1, 2,", "3, 3] context.set_context(mode=context.GRAPH_MODE) net = Net() out = net() assert list(out[0]) == [100]", "def test_list_slice_extend_front(): \"\"\" Feature: List assign Description: Test list slice extend Expectation: No", "= compare_func2(a, b, 0, 4, None) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4,", "None, 3)) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = convert_tuple(net(a, b, 2, 1,", "[11, 22, 33, 44, 55] pynative_out = net(a, b, 0, 0) assert pynative_out", "b, 0, 4, None) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5, 6,", "pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, -12, 456) assert graph_out ==", "Net2() a = [1, 2, 3, 4, 5, 6, 7, 8, 9] b", "1, 0, None, 3) == net(a, b, 0, None, 1, 0, None, 3)", "33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 0) assert graph_out ==", "assert list(out[2]) == [3, 3, 3] def test_list_neg_index_1d(): \"\"\" Feature: List index assign", "net self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True) def construct(self, x, sens): return self.grad_all_with_sens(self.net)(x, sens) net", "== [200, 201] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net()", "def test_list_slice_shrink(): \"\"\" Feature: List assign Description: Test list slice shrink assign Expectation:", "b[start:stop:step] return tuple(a) def compare_func1(a, b, start=None, stop=None, step=None): a[start:stop:step] = b[start:stop:step] return", "44, 55] pynative_out = net(a, b, -1, -9, -3) assert pynative_out == python_out", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "Description: Test list slice extend Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a = [1,", "assert list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_1d():", "0, 0) assert pynative_out == python_out a = [1, 2, 3, 4, 5,", "3, 3]] list_[-3] = [100] return list_ net = Net() out = net()", "8, 9] b = [11, 22, 33, 44, 55] pynative_out = net(a, b,", "graph_out = net(a, b, -1, -9, -3) assert graph_out == python_out def test_graph_list_slice_assign_extended_number():", "= compare_func2(a, b, -1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1,", "(i,) return result def test_list_in_list_slice(): \"\"\" Feature: List assign Description: Test high dimension", "2) assert pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, None, 2)", "0, None, 2) assert pynative_mode_out == python_out context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0,", "22, 33, 44, 55] net = Net2() pynative_out = net(a, b, 5, 5)", "+= (tuple(i),) continue result += (i,) return result def test_list_in_list_slice(): \"\"\" Feature: List", "3, 4, 5, 6, 7, 8, 9] b = [33, 44, 55] graph_out", "size 3\" in str(err.value) context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(ValueError) as err: net(a, b, 0, None,", "compare_func2(a, b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) class Net2(Cell): def", "python_out def test_list_slice_tuple_with_step(): \"\"\" Feature: List assign Description: Test list slice assign with", "an iterable\" in str(err.value) def test_list_slice_negetive_error(): \"\"\" Feature: List assign Description: Test negative", "context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [1] assert list(out[1]) == [2, 2]", "301 list_[2][0][2] = 302 return list_ context.set_context(mode=context.PYNATIVE_MODE) net = Net() out = net()", "b, index, start=None, stop=None, step=None): a[index][start:stop:step] = b return tuple(a) def com_func3(a, b,", "3).reshape(2, 3)) grad_net(x, sens) def test_parameter_list_index_3D_bprop(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__()", "b, start=None, stop=None, step=None): a[start:stop:step] = b return tuple(a) class Net2(Cell): def construct(self,", "test_list_slice_shrink(): \"\"\" Feature: List assign Description: Test list slice shrink assign Expectation: No", "33] assert foo(a, b, 0, None, 1, 0, None, 3) == net(a, b,", "Net2() context.set_context(mode=context.PYNATIVE_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 1) assert \"can", "22, 33, 44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 0, 0) assert graph_out", "x], [x, x]]] list_value[2][0][1] = value return self.relu(list_value[2][0][1]) class GradNet(nn.Cell): def __init__(self, net):", "7, 8, 9] b = (11, 22, 33) context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b,", "3]] list_[1][0] = 200 list_[1][1] = 201 return list_ net = Net() out", "language governing permissions and # limitations under the License. # ============================================================================ \"\"\" test", "assert graph_out == python_out def test_list_slice_extend(): \"\"\" Feature: List assign Description: Test list", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= compare_func2(a, [], 1, 3) context.set_context(mode=context.PYNATIVE_MODE) a = [1, 2, 3, 4, 5,", "Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class Net(nn.Cell): def construct(self): list_ = [[1], [2,", "0, None, 1) assert \"can only assign an iterable\" in str(err.value) def test_list_slice_negetive_error():", "context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3, 4, 5, 6, 7] graph_out = net(a,", "-9, -3) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2, 3, 4, 5,", "b = [11, 22, 33] assert foo(a, b, 0, None, 1, 0, None,", "assert list(out[1]) == [2, 2] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) net", "step=None): a[index][start:stop:step] = b return tuple(a) def com_func3(a, b, index, start=None, stop=None, step=None):", "return convert_tuple(a) a = [1, 2, [1, 2, 3, 4, 5, 6, 7],", "iterable to extended slice\" in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b,", "as err: net(a, b, -1, -3, -3) assert \"attempt to assign sequence of", "\"attempt to assign sequence of size 3 to extended slice of size 1\"", "b, 0, None, 1) assert \"can only assign an iterable\" in str(err.value) def", "3] def test_list_neg_index_2d(): \"\"\" Feature: List index assign Description: Test list assign in", "2] assert list(out[2]) == [3, 3, 3] def test_list_neg_index_1d(): \"\"\" Feature: List index", "slice assign Expectation: ValueError \"\"\" context.set_context(mode=context.PYNATIVE_MODE) @ms_function def foo(a, b, start1, stop1, step1,", "Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.value = [[1], [2, 2], [[3, 3], [3,", "[3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) == [100] assert list(out[1])", "3] context.set_context(mode=context.GRAPH_MODE) net = Net() out = net() assert list(out[0]) == [100] assert", "== [2, 2] assert list(out[2][0]) == [30, 31, 32] context.set_context(mode=context.GRAPH_MODE) out = net()", "assign Description: Test list slice insert assign Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) a", "self.relu = P.ReLU() def construct(self, input_x): list_x = self.value list_x[2][0][1] = input_x return", "def construct(self, x, value): list_value = [[x], [x, x], [[x, x], [x, x]]]", "b, 0, None, 1, 0, None, 3) def convert_tuple(a): result = tuple() for", "7, 8, 9] b = [11, 22, 33] python_out = compare_func2(a, b, 0,", "Description: Test negative step list slice assign Expectation: ValueError \"\"\" a = [1,", "np.int64)) sens = Tensor(np.arange(2 * 3).reshape(2, 3)) grad_net(x, value, sens) class Net1(Cell): def", "2, 3, 4, 5, 6, 7, 8, 9] b = [11, 22, 33]", "[300, 301, 302] def test_list_neg_index_3d(): \"\"\" Feature: List index assign Description: Test list", "456) assert graph_out == python_out def test_list_slice_extend(): \"\"\" Feature: List assign Description: Test", "(11, 22, 33) python_out = compare_func2(a, b, 1, None, 3) context.set_context(mode=context.PYNATIVE_MODE) a =", "[3, 3]]] self.relu = P.ReLU() def construct(self, x, value): list_value = [[x], [x,", "3) assert graph_out == python_out def test_list_double_slice(): \"\"\" Feature: List assign Description: Test", "except in compliance with the License. # You may obtain a copy of", "in str(err.value) context.set_context(mode=context.GRAPH_MODE) with pytest.raises(TypeError) as err: net(a, b, 0, None, 1) assert", "55] graph_out = net(a, b, -1, -9, -3) assert graph_out == python_out def", "6, 7], 8, 9] b = [1111, 2222] pynative_out = convert_tuple(net(a, b, 2,", "100 return list_ net = Net() net(Tensor(0)) def test_list_index_3D_parameter(): context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def", "a = [1, 2, 3, 4] b = [5, 6, 7, 8] net", "4] b = [5, 6, 7, 8] net = Net1() pynative_mode_out = net(a,", "= net(a, b, -1, -9, -3) assert graph_out == python_out def test_graph_list_slice_assign_extended_number(): \"\"\"", "assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out = net() assert list(out[0]) ==", "assign Description: Test list slice erase Expectation: No exception. \"\"\" a = [1,", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "def construct(self): list_ = [[1], [2, 2], [3, 3, 3]] list_[1][-2] = 20", "7, 8, 9] b = [11, 22, 33, 44, 55] net = Net2()", "33, 44, 55] pynative_out = net(a, b, 0, 0) assert pynative_out == python_out", "assign Description: Test list slice start and stop is larger than size Expectation:", "4, 5, 6, 7, 8, 9] b = [33, 44, 55] python_out =", "exception. \"\"\" class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3, 3,", "context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def construct(self, x): list_ = [[x, x]] list_[0][0] = 100", "assert list(out[0]) == [100] assert list(out[1]) == [2, 2] assert list(out[2]) == [3,", "net(a, b, 0, None, 1) assert \"can only assign an iterable\" in str(err.value)", "6, 7] net = Net2() pynative_out = net(a, [], 1, 3) assert pynative_out", "list(out[1]) == [200, 201] assert list(out[2]) == [3, 3, 3] context.set_context(mode=context.GRAPH_MODE) out =", "= net(a, [], 1, 3) assert graph_out == python_out def test_list_slice_tuple_without_step(): \"\"\" Feature:", "= [[1], [2, 2], [3, 3, 3]] list_[1][0] = 200 list_[1][1] = 201", "from mindspore import Tensor, ms_function from mindspore import context def test_list_index_1d(): \"\"\" Feature:", "22, 33) net = Net2() pynative_out = net(a, b, 0, 4, None) assert", "list_ context.set_context(mode=context.PYNATIVE_MODE) net = Net() out = net() assert list(out[0]) == [1] assert", "44, 55] context.set_context(mode=context.GRAPH_MODE) graph_out = net(a, b, 5, 5) assert graph_out == python_out", "def test_list_slice_insert(): \"\"\" Feature: List assign Description: Test list slice insert assign Expectation:", "\"\"\" a = [1, 2, 3, 4, 5, 6, 7] python_out = compare_func2(a,", "= tuple() for i in a: if isinstance(i, list): result += (tuple(i),) continue", "-1, -9, -3) assert pynative_out == python_out context.set_context(mode=context.GRAPH_MODE) a = [1, 2, 3,", "Description: Test list assign in pynative mode Expectation: No exception. \"\"\" context.set_context(mode=context.PYNATIVE_MODE) class", "b, -1, -9, -3) context.set_context(mode=context.PYNATIVE_MODE) net = Net2() a = [1, 2, 3,", "Ltd # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "7, 8, 9] b = [11, 22, 33, 44, 55] pynative_out = net(a,", "0, None, 1, 0, None, 3) == net(a, b, 0, None, 1, 0,", "net = Net2() pynative_out = net(a, b, 0, 5) assert pynative_out == python_out", "step1, start2, stop2, step2): a[start1:stop1:step1][start2: stop2: step2] = b return tuple(a) net =", "C.GradOperation(get_all=True, sens_param=True) def construct(self, x, sens): return self.grad_all_with_sens(self.net)(x, sens) net = Net() grad_net", "def construct(self, x, sens): return self.grad_all_with_sens(self.net)(x, sens) net = Net() grad_net = GradNet(net)", "class Net(nn.Cell): def construct(self): list_ = [[1], [2, 2], [[3, 3, 3]]] list_[2][0][0]", "b = (11, 22, 33) net = Net2() pynative_out = net(a, b, 1,", "assert list(out[1]) == [2, 2] assert list(out[2][0]) == [30, 31, 32] def test_list_index_1D_parameter():", "[[1], [2, 2], [[3, 3, 3]]] list_[2][0][-3] = 30 list_[2][0][-2] = 31 list_[2][0][-1]", "pytest.raises(ValueError) as err: net(a, b, 0, None, 2) assert \"attempt to assign sequence" ]
[ "tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json(): styles = [] for style_id,", "template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html',", "import func from guitarfan.models import * from guitarfan.extensions.flasksqlalchemy import db from guitarfan.extensions.flaskcache import", "import render_template, request, redirect, url_for, flash, Blueprint, jsonify, current_app from sqlalchemy import func", "in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def", "MusicStyle.get_item_text(style_id), 'count': tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt(): return \"\"\"<html> <head></head> <body> <pre>User-agent:", "db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName': tag_name, 'count': tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json')", "Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName': tag_name, 'count': tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def", "style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count}) return jsonify(styles=styles)", "= Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def", "from guitarfan.extensions.flasksqlalchemy import db from guitarfan.extensions.flaskcache import cache bp_site_index = Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\")", "new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json(): tags = [] for tag_id, tag_name, tab_count", "'tagName': tag_name, 'count': tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json(): styles =", "def index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json')", "cache bp_site_index = Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12)", "[] for tag_id, tag_name, tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName':", "db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt():", "Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json():", "redirect, url_for, flash, Blueprint, jsonify, current_app from sqlalchemy import func from guitarfan.models import", "jsonify, current_app from sqlalchemy import func from guitarfan.models import * from guitarfan.extensions.flasksqlalchemy import", "coding: utf-8 -*- from flask import render_template, request, redirect, url_for, flash, Blueprint, jsonify,", "key_prefix='style_cloud_json') def style_cloud_json(): styles = [] for style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId':", "render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json(): tags = [] for tag_id,", "flask import render_template, request, redirect, url_for, flash, Blueprint, jsonify, current_app from sqlalchemy import", "#!/usr/bin/env python # -*- coding: utf-8 -*- from flask import render_template, request, redirect,", "flash, Blueprint, jsonify, current_app from sqlalchemy import func from guitarfan.models import * from", "return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt(): return \"\"\"<html> <head></head> <body> <pre>User-agent: * Crawl-delay: 10", "@bp_site_index.route('/robots.txt') def robots_txt(): return \"\"\"<html> <head></head> <body> <pre>User-agent: * Crawl-delay: 10 Disallow: /admin", "import cache bp_site_index = Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def index(): hot_tabs =", "guitarfan.models import * from guitarfan.extensions.flasksqlalchemy import db from guitarfan.extensions.flaskcache import cache bp_site_index =", "tag_name, 'count': tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json(): styles = []", "from guitarfan.extensions.flaskcache import cache bp_site_index = Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def index():", "from guitarfan.models import * from guitarfan.extensions.flasksqlalchemy import db from guitarfan.extensions.flaskcache import cache bp_site_index", "styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt(): return \"\"\"<html>", "tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName': tag_name, 'count': tab_count}) return", "key_prefix='tag_cloud_json') def tag_cloud_json(): tags = [] for tag_id, tag_name, tab_count in db.session.query(Tag.id, Tag.name,", "tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt')", "-*- from flask import render_template, request, redirect, url_for, flash, Blueprint, jsonify, current_app from", "<gh_stars>10-100 #!/usr/bin/env python # -*- coding: utf-8 -*- from flask import render_template, request,", "return \"\"\"<html> <head></head> <body> <pre>User-agent: * Crawl-delay: 10 Disallow: /admin </pre> </body> </html>\"\"\"", "from sqlalchemy import func from guitarfan.models import * from guitarfan.extensions.flasksqlalchemy import db from", "= Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs =", "@cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json(): tags = [] for tag_id, tag_name, tab_count in db.session.query(Tag.id,", "robots_txt(): return \"\"\"<html> <head></head> <body> <pre>User-agent: * Crawl-delay: 10 Disallow: /admin </pre> </body>", "db from guitarfan.extensions.flaskcache import cache bp_site_index = Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def", "[] for style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count})", "# -*- coding: utf-8 -*- from flask import render_template, request, redirect, url_for, flash,", "Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12)", "-*- coding: utf-8 -*- from flask import render_template, request, redirect, url_for, flash, Blueprint,", "import * from guitarfan.extensions.flasksqlalchemy import db from guitarfan.extensions.flaskcache import cache bp_site_index = Blueprint('bp_site_index',", "for style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count}) return", "index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600,", "@cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json(): styles = [] for style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id):", "Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName': tag_name, 'count': tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600,", "jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt(): return \"\"\"<html> <head></head> <body> <pre>User-agent: * Crawl-delay: 10 Disallow:", "func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt(): return", "bp_site_index = Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs", "hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json(): tags = [] for tag_id, tag_name,", "'count': tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json(): styles = [] for", "@bp_site_index.route('/index') def index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs)", "for tag_id, tag_name, tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName': tag_name,", "func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName': tag_name, 'count': tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json')", "Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json(): tags = []", "new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json(): tags", "current_app from sqlalchemy import func from guitarfan.models import * from guitarfan.extensions.flasksqlalchemy import db", "tags = [] for tag_id, tag_name, tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId':", "'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt(): return \"\"\"<html> <head></head> <body>", "render_template, request, redirect, url_for, flash, Blueprint, jsonify, current_app from sqlalchemy import func from", "hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json')", "in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName': tag_name, 'count': tab_count}) return jsonify(tags=tags)", "= [] for tag_id, tag_name, tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id,", "utf-8 -*- from flask import render_template, request, redirect, url_for, flash, Blueprint, jsonify, current_app", "* from guitarfan.extensions.flasksqlalchemy import db from guitarfan.extensions.flaskcache import cache bp_site_index = Blueprint('bp_site_index', __name__,", "def style_cloud_json(): styles = [] for style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id,", "tag_cloud_json(): tags = [] for tag_id, tag_name, tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id):", "Blueprint, jsonify, current_app from sqlalchemy import func from guitarfan.models import * from guitarfan.extensions.flasksqlalchemy", "__name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return", "func from guitarfan.models import * from guitarfan.extensions.flasksqlalchemy import db from guitarfan.extensions.flaskcache import cache", "@bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json(): styles = [] for style_id, tab_count in db.session.query(Tab.style_id,", "from flask import render_template, request, redirect, url_for, flash, Blueprint, jsonify, current_app from sqlalchemy", "@bp_site_index.route('/') @bp_site_index.route('/index') def index(): hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12) new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs,", "style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt(): return \"\"\"<html> <head></head>", "sqlalchemy import func from guitarfan.models import * from guitarfan.extensions.flasksqlalchemy import db from guitarfan.extensions.flaskcache", "def tag_cloud_json(): tags = [] for tag_id, tag_name, tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab,", "return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json(): tags = [] for", "import db from guitarfan.extensions.flaskcache import cache bp_site_index = Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index')", "@bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json(): tags = [] for tag_id, tag_name, tab_count in", "'count': tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt(): return \"\"\"<html> <head></head> <body> <pre>User-agent: *", "= [] for style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count':", "request, redirect, url_for, flash, Blueprint, jsonify, current_app from sqlalchemy import func from guitarfan.models", "return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json(): styles = [] for style_id, tab_count", "def robots_txt(): return \"\"\"<html> <head></head> <body> <pre>User-agent: * Crawl-delay: 10 Disallow: /admin </pre>", "guitarfan.extensions.flasksqlalchemy import db from guitarfan.extensions.flaskcache import cache bp_site_index = Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/')", "style_cloud_json(): styles = [] for style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName':", "url_for, flash, Blueprint, jsonify, current_app from sqlalchemy import func from guitarfan.models import *", "tag_id, 'tagName': tag_name, 'count': tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json(): styles", "tags.append({'tagId': tag_id, 'tagName': tag_name, 'count': tab_count}) return jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json():", "guitarfan.extensions.flaskcache import cache bp_site_index = Blueprint('bp_site_index', __name__, template_folder=\"../../templates/site\") @bp_site_index.route('/') @bp_site_index.route('/index') def index(): hot_tabs", "styles = [] for style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id): styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id),", "tag_id, tag_name, tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName': tag_name, 'count':", "python # -*- coding: utf-8 -*- from flask import render_template, request, redirect, url_for,", "= Tab.query.order_by(Tab.update_time.desc()).limit(12) return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs) @bp_site_index.route('/tagcloud.json') @cache.cached(3600, key_prefix='tag_cloud_json') def tag_cloud_json(): tags =", "tag_name, tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id): tags.append({'tagId': tag_id, 'tagName': tag_name, 'count': tab_count})", "tab_count}) return jsonify(styles=styles) @bp_site_index.route('/robots.txt') def robots_txt(): return \"\"\"<html> <head></head> <body> <pre>User-agent: * Crawl-delay:", "jsonify(tags=tags) @bp_site_index.route('/stylecloud.json') @cache.cached(3600, key_prefix='style_cloud_json') def style_cloud_json(): styles = [] for style_id, tab_count in" ]
[ "kwargs: exception = kwargs['exc_info'][1] if isinstance(exception, web.HTTPError): result.update(exception.args) # TODO self.finish(result) def log_request(self):", "import urljoin import binascii import logging import eyed3 from eyed3.id3 import ID3_V1 from", "chunk): pass def reverse_full_url(self, name, *args): host_url = \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name, *args))", "{request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 * self.request.request_time()", "write_result(self, result): self.finish({'success': 1, 'data': result}) def write_error(self, status_code, **kwargs): result = {'success':", "0xFFFFFFFF) def md5(string: Union[str, bytes]): if isinstance(string, str): string = string.encode() return hashlib.md5(string).hexdigest()", "bytes]): if isinstance(string, str): string = string.encode() return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def md5(string:", "'crc32': return crc32(string) elif hash_func == 'md5': return md5(string) raise ValueError('Unknown hash function:", "'>', '/', '?', '‘', '’', '“', '”'] string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string)", "'‘', '’', '“', '”'] string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string) string = unidecode(string)", "string.lower() if truncate is not None: string = string[:truncate] return string def set_id3_tag(path:", "'', string) string = unidecode(string) # transliteration and other staff: converts to ascii", "TODO self.finish(result) def log_request(self): self.logger.info( '{remote_ip} {method} {request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format(", "converts to ascii string = string.strip() string = re.sub(r'\\s+', ' ', string) if", "'{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def md5(string: Union[str, bytes]): if isinstance(string, str): string = string.encode()", "return logger def vk_url(path: str): return urljoin('https://api.vk.com/', path) def crc32(string: Union[str, bytes]): if", "import logging import eyed3 from eyed3.id3 import ID3_V1 from unidecode import unidecode from", "str): string = string.encode() return hashlib.md5(string).hexdigest() def uni_hash(hash_func: str, string): if hash_func ==", "eyed3 from eyed3.id3 import ID3_V1 from unidecode import unidecode from tornado import web", "'*', '(', ')', '_', '=', '+', '[', '{', ']', '}', '\\\\', '|', ';',", "re.sub(r'|'.join(map(re.escape, bad_chars)), '', string) string = unidecode(string) # transliteration and other staff: converts", "def set_id3_tag(path: str, audio_info: Dict): audio = eyed3.load(path) audio.initTag(version=ID3_V1) audio.tag.title = unidecode(audio_info['title']).strip() audio.tag.artist", "re from typing import Union, Optional, Dict from urllib.parse import urljoin import binascii", "unidecode(string) # transliteration and other staff: converts to ascii string = string.strip() string", "== 'md5': return md5(string) raise ValueError('Unknown hash function: {}'.format(hash_func)) def sanitize(string, to_lower: bool", "function: {}'.format(hash_func)) def sanitize(string, to_lower: bool = True, alpha_numeric_only: bool = False, truncate:", "bytes]): if isinstance(string, str): string = string.encode() return hashlib.md5(string).hexdigest() def uni_hash(hash_func: str, string):", "md5(string) raise ValueError('Unknown hash function: {}'.format(hash_func)) def sanitize(string, to_lower: bool = True, alpha_numeric_only:", "'@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '=', '+', '[',", "remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 * self.request.request_time() ) ) def data_received(self, chunk): pass", "isinstance(exception, web.HTTPError): result.update(exception.args) # TODO self.finish(result) def log_request(self): self.logger.info( '{remote_ip} {method} {request_uri} =>", "hash_func == 'crc32': return crc32(string) elif hash_func == 'md5': return md5(string) raise ValueError('Unknown", "== 'crc32': return crc32(string) elif hash_func == 'md5': return md5(string) raise ValueError('Unknown hash", "vk_url(path: str): return urljoin('https://api.vk.com/', path) def crc32(string: Union[str, bytes]): if isinstance(string, str): string", "crc32(string) elif hash_func == 'md5': return md5(string) raise ValueError('Unknown hash function: {}'.format(hash_func)) def", "= False return logger def vk_url(path: str): return urljoin('https://api.vk.com/', path) def crc32(string: Union[str,", "import LOG_LEVEL class BasicHandler(web.RequestHandler): logger = None def prepare(self): self.logger.debug('{} request from {}:", "'—', '–', ',', '<', '>', '/', '?', '‘', '’', '“', '”'] string =", "request from {}: {}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri) ) self.logger.debug('Request body: {}'.format(self.request.body.decode())) def on_finish(self):", "'md5': return md5(string) raise ValueError('Unknown hash function: {}'.format(hash_func)) def sanitize(string, to_lower: bool =", "unidecode import unidecode from tornado import web from settings import LOG_LEVEL class BasicHandler(web.RequestHandler):", "basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate = False return logger def", "'}', '\\\\', '|', ';', ':', '\"', \"'\", '—', '–', ',', '<', '>', '/',", "'”'] string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string) string = unidecode(string) # transliteration and", "'|', ';', ':', '\"', \"'\", '—', '–', ',', '<', '>', '/', '?', '‘',", "ValueError('Unknown hash function: {}'.format(hash_func)) def sanitize(string, to_lower: bool = True, alpha_numeric_only: bool =", "& 0xFFFFFFFF) def md5(string: Union[str, bytes]): if isinstance(string, str): string = string.encode() return", "return md5(string) raise ValueError('Unknown hash function: {}'.format(hash_func)) def sanitize(string, to_lower: bool = True,", "import binascii import logging import eyed3 from eyed3.id3 import ID3_V1 from unidecode import", "urljoin('https://api.vk.com/', path) def crc32(string: Union[str, bytes]): if isinstance(string, str): string = string.encode() return", "string = unidecode(string) # transliteration and other staff: converts to ascii string =", "from tornado import web from settings import LOG_LEVEL class BasicHandler(web.RequestHandler): logger = None", "return urljoin('https://api.vk.com/', path) def crc32(string: Union[str, bytes]): if isinstance(string, str): string = string.encode()", "if isinstance(exception, web.HTTPError): result.update(exception.args) # TODO self.finish(result) def log_request(self): self.logger.info( '{remote_ip} {method} {request_uri}", "import ID3_V1 from unidecode import unidecode from tornado import web from settings import", "Dict from urllib.parse import urljoin import binascii import logging import eyed3 from eyed3.id3", "0, 'error': self._reason, 'error_code': status_code} if 'exc_info' in kwargs: exception = kwargs['exc_info'][1] if", "= ['~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')',", "eyed3.id3 import ID3_V1 from unidecode import unidecode from tornado import web from settings", "':', '\"', \"'\", '—', '–', ',', '<', '>', '/', '?', '‘', '’', '“',", "')', '_', '=', '+', '[', '{', ']', '}', '\\\\', '|', ';', ':', '\"',", "= re.sub(r'\\s+', ' ', string) if to_lower: string = string.lower() if truncate is", "{method} {request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 *", "urljoin(host_url, self.reverse_url(name, *args)) def setup_logger(name, lvl=logging.DEBUG): logger = logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler()", "from {}: {}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri) ) self.logger.debug('Request body: {}'.format(self.request.body.decode())) def on_finish(self): self.log_request()", "= logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler)", "logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate", "string = string.strip() string = re.sub(r'\\s+', ' ', string) if to_lower: string =", "reverse_full_url(self, name, *args): host_url = \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name, *args)) def setup_logger(name, lvl=logging.DEBUG):", "\"'\", '—', '–', ',', '<', '>', '/', '?', '‘', '’', '“', '”'] string", "'<', '>', '/', '?', '‘', '’', '“', '”'] string = re.sub(r'|'.join(map(re.escape, bad_chars)), '',", "{}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri) ) self.logger.debug('Request body: {}'.format(self.request.body.decode())) def on_finish(self): self.log_request() def write_result(self,", "return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def md5(string: Union[str, bytes]): if isinstance(string, str): string =", "= re.sub(r'\\w+', '', string) else: bad_chars = ['~', '`', '!', '@', '#', '$',", "request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 * self.request.request_time() ) ) def data_received(self, chunk): pass def reverse_full_url(self,", "'–', ',', '<', '>', '/', '?', '‘', '’', '“', '”'] string = re.sub(r'|'.join(map(re.escape,", "basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate = False", "'(', ')', '_', '=', '+', '[', '{', ']', '}', '\\\\', '|', ';', ':',", "result.update(exception.args) # TODO self.finish(result) def log_request(self): self.logger.info( '{remote_ip} {method} {request_uri} => HTTP: {status_code}", "= {'success': 0, 'error': self._reason, 'error_code': status_code} if 'exc_info' in kwargs: exception =", "logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate = False return logger def vk_url(path:", "self.logger.info( '{remote_ip} {method} {request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(),", "'data': result}) def write_error(self, status_code, **kwargs): result = {'success': 0, 'error': self._reason, 'error_code':", "logging import eyed3 from eyed3.id3 import ID3_V1 from unidecode import unidecode from tornado", "host_url = \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name, *args)) def setup_logger(name, lvl=logging.DEBUG): logger = logging.getLogger(name)", "elif hash_func == 'md5': return md5(string) raise ValueError('Unknown hash function: {}'.format(hash_func)) def sanitize(string,", "= None): if alpha_numeric_only: string = re.sub(r'\\w+', '', string) else: bad_chars = ['~',", "status_code, **kwargs): result = {'success': 0, 'error': self._reason, 'error_code': status_code} if 'exc_info' in", "= unidecode(string) # transliteration and other staff: converts to ascii string = string.strip()", "self.logger.debug('{} request from {}: {}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri) ) self.logger.debug('Request body: {}'.format(self.request.body.decode())) def", "body: {}'.format(self.request.body.decode())) def on_finish(self): self.log_request() def write_result(self, result): self.finish({'success': 1, 'data': result}) def", "settings import LOG_LEVEL class BasicHandler(web.RequestHandler): logger = None def prepare(self): self.logger.debug('{} request from", "['~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_',", "self.logger.debug('Request body: {}'.format(self.request.body.decode())) def on_finish(self): self.log_request() def write_result(self, result): self.finish({'success': 1, 'data': result})", "if 'exc_info' in kwargs: exception = kwargs['exc_info'][1] if isinstance(exception, web.HTTPError): result.update(exception.args) # TODO", "logger.addHandler(basic_stream_handler) logger.propagate = False return logger def vk_url(path: str): return urljoin('https://api.vk.com/', path) def", "'`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '=',", "def data_received(self, chunk): pass def reverse_full_url(self, name, *args): host_url = \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url,", "time=1000.0 * self.request.request_time() ) ) def data_received(self, chunk): pass def reverse_full_url(self, name, *args):", "def write_result(self, result): self.finish({'success': 1, 'data': result}) def write_error(self, status_code, **kwargs): result =", "def log_request(self): self.logger.info( '{remote_ip} {method} {request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(),", "'+', '[', '{', ']', '}', '\\\\', '|', ';', ':', '\"', \"'\", '—', '–',", "if to_lower: string = string.lower() if truncate is not None: string = string[:truncate]", "self.request.method.capitalize(), self.request.remote_ip, self.request.uri) ) self.logger.debug('Request body: {}'.format(self.request.body.decode())) def on_finish(self): self.log_request() def write_result(self, result):", "**kwargs): result = {'success': 0, 'error': self._reason, 'error_code': status_code} if 'exc_info' in kwargs:", "string def set_id3_tag(path: str, audio_info: Dict): audio = eyed3.load(path) audio.initTag(version=ID3_V1) audio.tag.title = unidecode(audio_info['title']).strip()", "'\"', \"'\", '—', '–', ',', '<', '>', '/', '?', '‘', '’', '“', '”']", "def prepare(self): self.logger.debug('{} request from {}: {}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri) ) self.logger.debug('Request body:", "bad_chars)), '', string) string = unidecode(string) # transliteration and other staff: converts to", "and other staff: converts to ascii string = string.strip() string = re.sub(r'\\s+', '", "web.HTTPError): result.update(exception.args) # TODO self.finish(result) def log_request(self): self.logger.info( '{remote_ip} {method} {request_uri} => HTTP:", ") def data_received(self, chunk): pass def reverse_full_url(self, name, *args): host_url = \"{protocol}://{host}\".format(**vars(self.request)) return", "False, truncate: Optional[int] = None): if alpha_numeric_only: string = re.sub(r'\\w+', '', string) else:", "from settings import LOG_LEVEL class BasicHandler(web.RequestHandler): logger = None def prepare(self): self.logger.debug('{} request", "result = {'success': 0, 'error': self._reason, 'error_code': status_code} if 'exc_info' in kwargs: exception", "to_lower: bool = True, alpha_numeric_only: bool = False, truncate: Optional[int] = None): if", "typing import Union, Optional, Dict from urllib.parse import urljoin import binascii import logging", "sanitize(string, to_lower: bool = True, alpha_numeric_only: bool = False, truncate: Optional[int] = None):", "to ascii string = string.strip() string = re.sub(r'\\s+', ' ', string) if to_lower:", "string = string[:truncate] return string def set_id3_tag(path: str, audio_info: Dict): audio = eyed3.load(path)", "Union[str, bytes]): if isinstance(string, str): string = string.encode() return hashlib.md5(string).hexdigest() def uni_hash(hash_func: str,", "'/', '?', '‘', '’', '“', '”'] string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string) string", "'?', '‘', '’', '“', '”'] string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string) string =", "name, *args): host_url = \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name, *args)) def setup_logger(name, lvl=logging.DEBUG): logger", "Optional, Dict from urllib.parse import urljoin import binascii import logging import eyed3 from", "string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string) string = unidecode(string) # transliteration and other", "crc32(string: Union[str, bytes]): if isinstance(string, str): string = string.encode() return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF)", "=> HTTP: {status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 * self.request.request_time() )", "*args)) def setup_logger(name, lvl=logging.DEBUG): logger = logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s", "str): return urljoin('https://api.vk.com/', path) def crc32(string: Union[str, bytes]): if isinstance(string, str): string =", "def uni_hash(hash_func: str, string): if hash_func == 'crc32': return crc32(string) elif hash_func ==", "hash function: {}'.format(hash_func)) def sanitize(string, to_lower: bool = True, alpha_numeric_only: bool = False,", "None: string = string[:truncate] return string def set_id3_tag(path: str, audio_info: Dict): audio =", "class BasicHandler(web.RequestHandler): logger = None def prepare(self): self.logger.debug('{} request from {}: {}'.format( self.request.method.capitalize(),", "hashlib import re from typing import Union, Optional, Dict from urllib.parse import urljoin", "= string.lower() if truncate is not None: string = string[:truncate] return string def", "logger = logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL)", "return crc32(string) elif hash_func == 'md5': return md5(string) raise ValueError('Unknown hash function: {}'.format(hash_func))", "else: bad_chars = ['~', '`', '!', '@', '#', '$', '%', '^', '&', '*',", "lvl=logging.DEBUG): logger = logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') )", "string.encode() return hashlib.md5(string).hexdigest() def uni_hash(hash_func: str, string): if hash_func == 'crc32': return crc32(string)", "audio_info: Dict): audio = eyed3.load(path) audio.initTag(version=ID3_V1) audio.tag.title = unidecode(audio_info['title']).strip() audio.tag.artist = unidecode(audio_info['artist']).strip() audio.tag.save(version=ID3_V1)", "'!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '=', '+',", "'{', ']', '}', '\\\\', '|', ';', ':', '\"', \"'\", '—', '–', ',', '<',", "if isinstance(string, str): string = string.encode() return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def md5(string: Union[str,", "self.request.uri) ) self.logger.debug('Request body: {}'.format(self.request.body.decode())) def on_finish(self): self.log_request() def write_result(self, result): self.finish({'success': 1,", "write_error(self, status_code, **kwargs): result = {'success': 0, 'error': self._reason, 'error_code': status_code} if 'exc_info'", "self._reason, 'error_code': status_code} if 'exc_info' in kwargs: exception = kwargs['exc_info'][1] if isinstance(exception, web.HTTPError):", "self.reverse_url(name, *args)) def setup_logger(name, lvl=logging.DEBUG): logger = logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter(", "web from settings import LOG_LEVEL class BasicHandler(web.RequestHandler): logger = None def prepare(self): self.logger.debug('{}", "from eyed3.id3 import ID3_V1 from unidecode import unidecode from tornado import web from", "logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate = False return logger", "%(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate = False return logger def vk_url(path: str):", "= string.encode() return hashlib.md5(string).hexdigest() def uni_hash(hash_func: str, string): if hash_func == 'crc32': return", "BasicHandler(web.RequestHandler): logger = None def prepare(self): self.logger.debug('{} request from {}: {}'.format( self.request.method.capitalize(), self.request.remote_ip,", "' ', string) if to_lower: string = string.lower() if truncate is not None:", "to_lower: string = string.lower() if truncate is not None: string = string[:truncate] return", "from typing import Union, Optional, Dict from urllib.parse import urljoin import binascii import", "def crc32(string: Union[str, bytes]): if isinstance(string, str): string = string.encode() return '{:08x}'.format(binascii.crc32(string) &", "self.finish(result) def log_request(self): self.logger.info( '{remote_ip} {method} {request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip,", "import web from settings import LOG_LEVEL class BasicHandler(web.RequestHandler): logger = None def prepare(self):", "bool = True, alpha_numeric_only: bool = False, truncate: Optional[int] = None): if alpha_numeric_only:", "if hash_func == 'crc32': return crc32(string) elif hash_func == 'md5': return md5(string) raise", "alpha_numeric_only: string = re.sub(r'\\w+', '', string) else: bad_chars = ['~', '`', '!', '@',", "{status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 * self.request.request_time() ) ) def", "string): if hash_func == 'crc32': return crc32(string) elif hash_func == 'md5': return md5(string)", "False return logger def vk_url(path: str): return urljoin('https://api.vk.com/', path) def crc32(string: Union[str, bytes]):", "raise ValueError('Unknown hash function: {}'.format(hash_func)) def sanitize(string, to_lower: bool = True, alpha_numeric_only: bool", "status_code} if 'exc_info' in kwargs: exception = kwargs['exc_info'][1] if isinstance(exception, web.HTTPError): result.update(exception.args) #", "hash_func == 'md5': return md5(string) raise ValueError('Unknown hash function: {}'.format(hash_func)) def sanitize(string, to_lower:", "Union[str, bytes]): if isinstance(string, str): string = string.encode() return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def", "= string[:truncate] return string def set_id3_tag(path: str, audio_info: Dict): audio = eyed3.load(path) audio.initTag(version=ID3_V1)", "return hashlib.md5(string).hexdigest() def uni_hash(hash_func: str, string): if hash_func == 'crc32': return crc32(string) elif", "truncate: Optional[int] = None): if alpha_numeric_only: string = re.sub(r'\\w+', '', string) else: bad_chars", "*args): host_url = \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name, *args)) def setup_logger(name, lvl=logging.DEBUG): logger =", "set_id3_tag(path: str, audio_info: Dict): audio = eyed3.load(path) audio.initTag(version=ID3_V1) audio.tag.title = unidecode(audio_info['title']).strip() audio.tag.artist =", "string) if to_lower: string = string.lower() if truncate is not None: string =", "transliteration and other staff: converts to ascii string = string.strip() string = re.sub(r'\\s+',", "is not None: string = string[:truncate] return string def set_id3_tag(path: str, audio_info: Dict):", "= string.encode() return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def md5(string: Union[str, bytes]): if isinstance(string, str):", "'%', '^', '&', '*', '(', ')', '_', '=', '+', '[', '{', ']', '}',", "def write_error(self, status_code, **kwargs): result = {'success': 0, 'error': self._reason, 'error_code': status_code} if", "other staff: converts to ascii string = string.strip() string = re.sub(r'\\s+', ' ',", "import re from typing import Union, Optional, Dict from urllib.parse import urljoin import", "ID3_V1 from unidecode import unidecode from tornado import web from settings import LOG_LEVEL", "= kwargs['exc_info'][1] if isinstance(exception, web.HTTPError): result.update(exception.args) # TODO self.finish(result) def log_request(self): self.logger.info( '{remote_ip}", "({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 * self.request.request_time() ) ) def data_received(self,", "= string.strip() string = re.sub(r'\\s+', ' ', string) if to_lower: string = string.lower()", "hashlib.md5(string).hexdigest() def uni_hash(hash_func: str, string): if hash_func == 'crc32': return crc32(string) elif hash_func", "staff: converts to ascii string = string.strip() string = re.sub(r'\\s+', ' ', string)", "# transliteration and other staff: converts to ascii string = string.strip() string =", "string) string = unidecode(string) # transliteration and other staff: converts to ascii string", "def reverse_full_url(self, name, *args): host_url = \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name, *args)) def setup_logger(name,", "tornado import web from settings import LOG_LEVEL class BasicHandler(web.RequestHandler): logger = None def", "string.encode() return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def md5(string: Union[str, bytes]): if isinstance(string, str): string", "alpha_numeric_only: bool = False, truncate: Optional[int] = None): if alpha_numeric_only: string = re.sub(r'\\w+',", "'[', '{', ']', '}', '\\\\', '|', ';', ':', '\"', \"'\", '—', '–', ',',", "kwargs['exc_info'][1] if isinstance(exception, web.HTTPError): result.update(exception.args) # TODO self.finish(result) def log_request(self): self.logger.info( '{remote_ip} {method}", "not None: string = string[:truncate] return string def set_id3_tag(path: str, audio_info: Dict): audio", "= True, alpha_numeric_only: bool = False, truncate: Optional[int] = None): if alpha_numeric_only: string", "\"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name, *args)) def setup_logger(name, lvl=logging.DEBUG): logger = logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler", "data_received(self, chunk): pass def reverse_full_url(self, name, *args): host_url = \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name,", "'=', '+', '[', '{', ']', '}', '\\\\', '|', ';', ':', '\"', \"'\", '—',", "self.request.remote_ip, self.request.uri) ) self.logger.debug('Request body: {}'.format(self.request.body.decode())) def on_finish(self): self.log_request() def write_result(self, result): self.finish({'success':", "* self.request.request_time() ) ) def data_received(self, chunk): pass def reverse_full_url(self, name, *args): host_url", "= logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate = False return", "truncate is not None: string = string[:truncate] return string def set_id3_tag(path: str, audio_info:", "if alpha_numeric_only: string = re.sub(r'\\w+', '', string) else: bad_chars = ['~', '`', '!',", "'^', '&', '*', '(', ')', '_', '=', '+', '[', '{', ']', '}', '\\\\',", ") self.logger.debug('Request body: {}'.format(self.request.body.decode())) def on_finish(self): self.log_request() def write_result(self, result): self.finish({'success': 1, 'data':", "def setup_logger(name, lvl=logging.DEBUG): logger = logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s", "log_request(self): self.logger.info( '{remote_ip} {method} {request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri,", "exception = kwargs['exc_info'][1] if isinstance(exception, web.HTTPError): result.update(exception.args) # TODO self.finish(result) def log_request(self): self.logger.info(", "{}'.format(hash_func)) def sanitize(string, to_lower: bool = True, alpha_numeric_only: bool = False, truncate: Optional[int]", "unidecode from tornado import web from settings import LOG_LEVEL class BasicHandler(web.RequestHandler): logger =", "result}) def write_error(self, status_code, **kwargs): result = {'success': 0, 'error': self._reason, 'error_code': status_code}", "bad_chars = ['~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(',", "string) else: bad_chars = ['~', '`', '!', '@', '#', '$', '%', '^', '&',", "']', '}', '\\\\', '|', ';', ':', '\"', \"'\", '—', '–', ',', '<', '>',", "{'success': 0, 'error': self._reason, 'error_code': status_code} if 'exc_info' in kwargs: exception = kwargs['exc_info'][1]", "isinstance(string, str): string = string.encode() return hashlib.md5(string).hexdigest() def uni_hash(hash_func: str, string): if hash_func", "ascii string = string.strip() string = re.sub(r'\\s+', ' ', string) if to_lower: string", "def sanitize(string, to_lower: bool = True, alpha_numeric_only: bool = False, truncate: Optional[int] =", "on_finish(self): self.log_request() def write_result(self, result): self.finish({'success': 1, 'data': result}) def write_error(self, status_code, **kwargs):", "= \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name, *args)) def setup_logger(name, lvl=logging.DEBUG): logger = logging.getLogger(name) logger.setLevel(lvl)", "= re.sub(r'|'.join(map(re.escape, bad_chars)), '', string) string = unidecode(string) # transliteration and other staff:", "# TODO self.finish(result) def log_request(self): self.logger.info( '{remote_ip} {method} {request_uri} => HTTP: {status_code} ({time:.0f}", "Union, Optional, Dict from urllib.parse import urljoin import binascii import logging import eyed3", "pass def reverse_full_url(self, name, *args): host_url = \"{protocol}://{host}\".format(**vars(self.request)) return urljoin(host_url, self.reverse_url(name, *args)) def", "isinstance(string, str): string = string.encode() return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def md5(string: Union[str, bytes]):", "'$', '%', '^', '&', '*', '(', ')', '_', '=', '+', '[', '{', ']',", "LOG_LEVEL class BasicHandler(web.RequestHandler): logger = None def prepare(self): self.logger.debug('{} request from {}: {}'.format(", "binascii import logging import eyed3 from eyed3.id3 import ID3_V1 from unidecode import unidecode", "import unidecode from tornado import web from settings import LOG_LEVEL class BasicHandler(web.RequestHandler): logger", "HTTP: {status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 * self.request.request_time() ) )", "self.request.request_time() ) ) def data_received(self, chunk): pass def reverse_full_url(self, name, *args): host_url =", "re.sub(r'\\s+', ' ', string) if to_lower: string = string.lower() if truncate is not", "string = string.lower() if truncate is not None: string = string[:truncate] return string", "', string) if to_lower: string = string.lower() if truncate is not None: string", "logger = None def prepare(self): self.logger.debug('{} request from {}: {}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri)", "str, string): if hash_func == 'crc32': return crc32(string) elif hash_func == 'md5': return", "bool = False, truncate: Optional[int] = None): if alpha_numeric_only: string = re.sub(r'\\w+', '',", "%(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate = False return logger def vk_url(path: str): return", "'&', '*', '(', ')', '_', '=', '+', '[', '{', ']', '}', '\\\\', '|',", "= False, truncate: Optional[int] = None): if alpha_numeric_only: string = re.sub(r'\\w+', '', string)", "'', string) else: bad_chars = ['~', '`', '!', '@', '#', '$', '%', '^',", "result): self.finish({'success': 1, 'data': result}) def write_error(self, status_code, **kwargs): result = {'success': 0,", "True, alpha_numeric_only: bool = False, truncate: Optional[int] = None): if alpha_numeric_only: string =", "= None def prepare(self): self.logger.debug('{} request from {}: {}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri) )", "prepare(self): self.logger.debug('{} request from {}: {}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri) ) self.logger.debug('Request body: {}'.format(self.request.body.decode()))", "'#', '$', '%', '^', '&', '*', '(', ')', '_', '=', '+', '[', '{',", "'“', '”'] string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string) string = unidecode(string) # transliteration", ") ) def data_received(self, chunk): pass def reverse_full_url(self, name, *args): host_url = \"{protocol}://{host}\".format(**vars(self.request))", "string = re.sub(r'\\s+', ' ', string) if to_lower: string = string.lower() if truncate", "logger.propagate = False return logger def vk_url(path: str): return urljoin('https://api.vk.com/', path) def crc32(string:", "1, 'data': result}) def write_error(self, status_code, **kwargs): result = {'success': 0, 'error': self._reason,", "str): string = string.encode() return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def md5(string: Union[str, bytes]): if", "Optional[int] = None): if alpha_numeric_only: string = re.sub(r'\\w+', '', string) else: bad_chars =", "def md5(string: Union[str, bytes]): if isinstance(string, str): string = string.encode() return hashlib.md5(string).hexdigest() def", "';', ':', '\"', \"'\", '—', '–', ',', '<', '>', '/', '?', '‘', '’',", "str, audio_info: Dict): audio = eyed3.load(path) audio.initTag(version=ID3_V1) audio.tag.title = unidecode(audio_info['title']).strip() audio.tag.artist = unidecode(audio_info['artist']).strip()", "import hashlib import re from typing import Union, Optional, Dict from urllib.parse import", "'{remote_ip} {method} {request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0", "uni_hash(hash_func: str, string): if hash_func == 'crc32': return crc32(string) elif hash_func == 'md5':", "string = re.sub(r'\\w+', '', string) else: bad_chars = ['~', '`', '!', '@', '#',", "string[:truncate] return string def set_id3_tag(path: str, audio_info: Dict): audio = eyed3.load(path) audio.initTag(version=ID3_V1) audio.tag.title", "urljoin import binascii import logging import eyed3 from eyed3.id3 import ID3_V1 from unidecode", "in kwargs: exception = kwargs['exc_info'][1] if isinstance(exception, web.HTTPError): result.update(exception.args) # TODO self.finish(result) def", "status_code=self.get_status(), time=1000.0 * self.request.request_time() ) ) def data_received(self, chunk): pass def reverse_full_url(self, name,", "basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate = False return logger def vk_url(path: str): return urljoin('https://api.vk.com/', path)", "path) def crc32(string: Union[str, bytes]): if isinstance(string, str): string = string.encode() return '{:08x}'.format(binascii.crc32(string)", "string.strip() string = re.sub(r'\\s+', ' ', string) if to_lower: string = string.lower() if", "re.sub(r'\\w+', '', string) else: bad_chars = ['~', '`', '!', '@', '#', '$', '%',", "from unidecode import unidecode from tornado import web from settings import LOG_LEVEL class", "self.finish({'success': 1, 'data': result}) def write_error(self, status_code, **kwargs): result = {'success': 0, 'error':", "{}: {}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri) ) self.logger.debug('Request body: {}'.format(self.request.body.decode())) def on_finish(self): self.log_request() def", "'’', '“', '”'] string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string) string = unidecode(string) #", "',', '<', '>', '/', '?', '‘', '’', '“', '”'] string = re.sub(r'|'.join(map(re.escape, bad_chars)),", "import Union, Optional, Dict from urllib.parse import urljoin import binascii import logging import", "def vk_url(path: str): return urljoin('https://api.vk.com/', path) def crc32(string: Union[str, bytes]): if isinstance(string, str):", "string = string.encode() return hashlib.md5(string).hexdigest() def uni_hash(hash_func: str, string): if hash_func == 'crc32':", "'exc_info' in kwargs: exception = kwargs['exc_info'][1] if isinstance(exception, web.HTTPError): result.update(exception.args) # TODO self.finish(result)", "{}'.format(self.request.body.decode())) def on_finish(self): self.log_request() def write_result(self, result): self.finish({'success': 1, 'data': result}) def write_error(self,", "md5(string: Union[str, bytes]): if isinstance(string, str): string = string.encode() return hashlib.md5(string).hexdigest() def uni_hash(hash_func:", "def on_finish(self): self.log_request() def write_result(self, result): self.finish({'success': 1, 'data': result}) def write_error(self, status_code,", ") basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate = False return logger def vk_url(path: str): return urljoin('https://api.vk.com/',", "logger def vk_url(path: str): return urljoin('https://api.vk.com/', path) def crc32(string: Union[str, bytes]): if isinstance(string,", "from urllib.parse import urljoin import binascii import logging import eyed3 from eyed3.id3 import", "urllib.parse import urljoin import binascii import logging import eyed3 from eyed3.id3 import ID3_V1", "'error': self._reason, 'error_code': status_code} if 'exc_info' in kwargs: exception = kwargs['exc_info'][1] if isinstance(exception,", "string = string.encode() return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF) def md5(string: Union[str, bytes]): if isinstance(string,", "if isinstance(string, str): string = string.encode() return hashlib.md5(string).hexdigest() def uni_hash(hash_func: str, string): if", "import eyed3 from eyed3.id3 import ID3_V1 from unidecode import unidecode from tornado import", "'_', '=', '+', '[', '{', ']', '}', '\\\\', '|', ';', ':', '\"', \"'\",", "if truncate is not None: string = string[:truncate] return string def set_id3_tag(path: str,", "return string def set_id3_tag(path: str, audio_info: Dict): audio = eyed3.load(path) audio.initTag(version=ID3_V1) audio.tag.title =", "logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s') ) basic_stream_handler.setLevel(LOG_LEVEL) logger.addHandler(basic_stream_handler) logger.propagate =", "None def prepare(self): self.logger.debug('{} request from {}: {}'.format( self.request.method.capitalize(), self.request.remote_ip, self.request.uri) ) self.logger.debug('Request", "self.log_request() def write_result(self, result): self.finish({'success': 1, 'data': result}) def write_error(self, status_code, **kwargs): result", "'\\\\', '|', ';', ':', '\"', \"'\", '—', '–', ',', '<', '>', '/', '?',", "return urljoin(host_url, self.reverse_url(name, *args)) def setup_logger(name, lvl=logging.DEBUG): logger = logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler =", "'error_code': status_code} if 'exc_info' in kwargs: exception = kwargs['exc_info'][1] if isinstance(exception, web.HTTPError): result.update(exception.args)", "None): if alpha_numeric_only: string = re.sub(r'\\w+', '', string) else: bad_chars = ['~', '`',", "setup_logger(name, lvl=logging.DEBUG): logger = logging.getLogger(name) logger.setLevel(lvl) basic_stream_handler = logging.StreamHandler() basic_stream_handler.setFormatter( logging.Formatter('%(levelname)-8s %(asctime)s %(message)s')", "method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 * self.request.request_time() ) ) def data_received(self, chunk): pass def", "ms)'.format( remote_ip=self.request.remote_ip, method=self.request.method.upper(), request_uri=self.request.uri, status_code=self.get_status(), time=1000.0 * self.request.request_time() ) ) def data_received(self, chunk):" ]
[ "with plotting_context(\"poster\"): grid = plot_performance( data, title=\"Overall Performance\", col=\"seed\", col_wrap=3 ) grid.fig.subplots_adjust(top=0.92) grid.savefig(\"output/sigmoid_overall_performance_per_seed.pdf\")", "grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per instance seed (hue) grid = plot_performance(data, title=\"Overall Performance\", hue=\"seed\")", "plot_performance_per_instance( data, title=\"CMA Mean Performance per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example(): \"\"\"", "grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example(): \"\"\" Plot Sigmoid performance over time, divided by seed", "log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid = plot_performance_per_instance( data, title=\"CMA Mean Performance per Instance\" )", "from dacbench.logger import load_logs, log2dataframe from dacbench.plotting import plot_performance_per_instance, plot_performance import matplotlib.pyplot as", "(hue) grid = plot_performance(data, title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per instance seed", "logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall grid =", "plot_performance_per_instance, plot_performance import matplotlib.pyplot as plt def per_instance_example(): \"\"\" Plot CMA performance for", "performance for each training instance \"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data", "by seed and with each seed in its own plot \"\"\" file =", "plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per instance seed (hue) grid = plot_performance(data,", "instance \"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"])", "Sigmoid performance over time, divided by seed and with each seed in its", "over time, divided by seed and with each seed in its own plot", "drop_columns=[\"time\"]) grid = plot_performance_per_instance( data, title=\"CMA Mean Performance per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show()", "# per instance seed (hue) grid = plot_performance(data, title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show()", "logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid = plot_performance_per_instance( data, title=\"CMA", "plot_performance import matplotlib.pyplot as plt def per_instance_example(): \"\"\" Plot CMA performance for each", "matplotlib.pyplot as plt def per_instance_example(): \"\"\" Plot CMA performance for each training instance", "instance seed (col) with plotting_context(\"poster\"): grid = plot_performance( data, title=\"Overall Performance\", col=\"seed\", col_wrap=3", "= log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall grid = plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\")", "plot_performance(data, title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per instance seed (col) with plotting_context(\"poster\"):", "wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall grid = plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() #", "# overall grid = plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per instance seed", "wide=True, drop_columns=[\"time\"]) grid = plot_performance_per_instance( data, title=\"CMA Mean Performance per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\")", "= load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall grid = plot_performance(data,", "from pathlib import Path from seaborn import plotting_context from dacbench.logger import load_logs, log2dataframe", "title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per instance seed (col) with plotting_context(\"poster\"): grid", "Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per instance seed (col) with plotting_context(\"poster\"): grid =", "Performance\", col=\"seed\", col_wrap=3 ) grid.fig.subplots_adjust(top=0.92) grid.savefig(\"output/sigmoid_overall_performance_per_seed.pdf\") plt.show() if __name__ == \"__main__\": per_instance_example() performance_example()", "performance_example(): \"\"\" Plot Sigmoid performance over time, divided by seed and with each", "load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid = plot_performance_per_instance( data, title=\"CMA Mean Performance", "load_logs, log2dataframe from dacbench.plotting import plot_performance_per_instance, plot_performance import matplotlib.pyplot as plt def per_instance_example():", "seed in its own plot \"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data", "seed (hue) grid = plot_performance(data, title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per instance", "drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall grid = plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per", "grid = plot_performance(data, title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per instance seed (col)", "= load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid = plot_performance_per_instance( data, title=\"CMA Mean", "for each training instance \"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data =", "in its own plot \"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data =", "Path from seaborn import plotting_context from dacbench.logger import load_logs, log2dataframe from dacbench.plotting import", "log2dataframe from dacbench.plotting import plot_performance_per_instance, plot_performance import matplotlib.pyplot as plt def per_instance_example(): \"\"\"", "\"\"\" Plot CMA performance for each training instance \"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs", "dacbench.plotting import plot_performance_per_instance, plot_performance import matplotlib.pyplot as plt def per_instance_example(): \"\"\" Plot CMA", "import plotting_context from dacbench.logger import load_logs, log2dataframe from dacbench.plotting import plot_performance_per_instance, plot_performance import", "divided by seed and with each seed in its own plot \"\"\" file", "its own plot \"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs,", "per instance seed (hue) grid = plot_performance(data, title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() #", "per_instance_example(): \"\"\" Plot CMA performance for each training instance \"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\")", "each seed in its own plot \"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file)", "training instance \"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True,", "import Path from seaborn import plotting_context from dacbench.logger import load_logs, log2dataframe from dacbench.plotting", "title=\"Overall Performance\", col=\"seed\", col_wrap=3 ) grid.fig.subplots_adjust(top=0.92) grid.savefig(\"output/sigmoid_overall_performance_per_seed.pdf\") plt.show() if __name__ == \"__main__\": per_instance_example()", "import load_logs, log2dataframe from dacbench.plotting import plot_performance_per_instance, plot_performance import matplotlib.pyplot as plt def", "data, title=\"CMA Mean Performance per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example(): \"\"\" Plot", "log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall grid = plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show()", "Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per instance seed (hue) grid = plot_performance(data, title=\"Overall Performance\",", ") grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example(): \"\"\" Plot Sigmoid performance over time, divided by", "with each seed in its own plot \"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs =", "# per instance seed (col) with plotting_context(\"poster\"): grid = plot_performance( data, title=\"Overall Performance\",", "title=\"CMA Mean Performance per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example(): \"\"\" Plot Sigmoid", "and with each seed in its own plot \"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs", "(col) with plotting_context(\"poster\"): grid = plot_performance( data, title=\"Overall Performance\", col=\"seed\", col_wrap=3 ) grid.fig.subplots_adjust(top=0.92)", "per instance seed (col) with plotting_context(\"poster\"): grid = plot_performance( data, title=\"Overall Performance\", col=\"seed\",", "own plot \"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True,", "= plot_performance( data, title=\"Overall Performance\", col=\"seed\", col_wrap=3 ) grid.fig.subplots_adjust(top=0.92) grid.savefig(\"output/sigmoid_overall_performance_per_seed.pdf\") plt.show() if __name__", "time, divided by seed and with each seed in its own plot \"\"\"", "plotting_context from dacbench.logger import load_logs, log2dataframe from dacbench.plotting import plot_performance_per_instance, plot_performance import matplotlib.pyplot", "overall grid = plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per instance seed (hue)", "data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall grid = plot_performance(data, title=\"Overall Performance\")", "= Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall", "instance seed (hue) grid = plot_performance(data, title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per", "CMA performance for each training instance \"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file)", "from seaborn import plotting_context from dacbench.logger import load_logs, log2dataframe from dacbench.plotting import plot_performance_per_instance,", "hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per instance seed (col) with plotting_context(\"poster\"): grid = plot_performance(", "plt.show() # per instance seed (col) with plotting_context(\"poster\"): grid = plot_performance( data, title=\"Overall", "plt def per_instance_example(): \"\"\" Plot CMA performance for each training instance \"\"\" file", "grid = plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per instance seed (hue) grid", "title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per instance seed (hue) grid = plot_performance(data, title=\"Overall", "plot_performance( data, title=\"Overall Performance\", col=\"seed\", col_wrap=3 ) grid.fig.subplots_adjust(top=0.92) grid.savefig(\"output/sigmoid_overall_performance_per_seed.pdf\") plt.show() if __name__ ==", "grid = plot_performance_per_instance( data, title=\"CMA Mean Performance per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def", "load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall grid = plot_performance(data, title=\"Overall", "= plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per instance seed (hue) grid =", "= log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid = plot_performance_per_instance( data, title=\"CMA Mean Performance per Instance\"", "each training instance \"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs,", "\"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid", "as plt def per_instance_example(): \"\"\" Plot CMA performance for each training instance \"\"\"", "= Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid = plot_performance_per_instance(", "plot \"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"])", "Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) # overall grid", "plt.show() # per instance seed (hue) grid = plot_performance(data, title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\")", "import plot_performance_per_instance, plot_performance import matplotlib.pyplot as plt def per_instance_example(): \"\"\" Plot CMA performance", "per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example(): \"\"\" Plot Sigmoid performance over time,", "= plot_performance(data, title=\"Overall Performance\", hue=\"seed\") grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per instance seed (col) with", "Plot Sigmoid performance over time, divided by seed and with each seed in", "<gh_stars>10-100 from pathlib import Path from seaborn import plotting_context from dacbench.logger import load_logs,", "Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example(): \"\"\" Plot Sigmoid performance over time, divided", "Plot CMA performance for each training instance \"\"\" file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs =", "Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid = plot_performance_per_instance( data,", "plotting_context(\"poster\"): grid = plot_performance( data, title=\"Overall Performance\", col=\"seed\", col_wrap=3 ) grid.fig.subplots_adjust(top=0.92) grid.savefig(\"output/sigmoid_overall_performance_per_seed.pdf\") plt.show()", "seaborn import plotting_context from dacbench.logger import load_logs, log2dataframe from dacbench.plotting import plot_performance_per_instance, plot_performance", "Performance per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example(): \"\"\" Plot Sigmoid performance over", "= plot_performance_per_instance( data, title=\"CMA Mean Performance per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example():", "file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True) #", "data, title=\"Overall Performance\", col=\"seed\", col_wrap=3 ) grid.fig.subplots_adjust(top=0.92) grid.savefig(\"output/sigmoid_overall_performance_per_seed.pdf\") plt.show() if __name__ == \"__main__\":", "def performance_example(): \"\"\" Plot Sigmoid performance over time, divided by seed and with", "pathlib import Path from seaborn import plotting_context from dacbench.logger import load_logs, log2dataframe from", "\"\"\" Plot Sigmoid performance over time, divided by seed and with each seed", "grid = plot_performance( data, title=\"Overall Performance\", col=\"seed\", col_wrap=3 ) grid.fig.subplots_adjust(top=0.92) grid.savefig(\"output/sigmoid_overall_performance_per_seed.pdf\") plt.show() if", "\"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) Path(\"output\").mkdir(exist_ok=True)", "dacbench.logger import load_logs, log2dataframe from dacbench.plotting import plot_performance_per_instance, plot_performance import matplotlib.pyplot as plt", "seed (col) with plotting_context(\"poster\"): grid = plot_performance( data, title=\"Overall Performance\", col=\"seed\", col_wrap=3 )", "plt.show() def performance_example(): \"\"\" Plot Sigmoid performance over time, divided by seed and", "Path(\"output\").mkdir(exist_ok=True) # overall grid = plot_performance(data, title=\"Overall Performance\") grid.savefig(\"output/sigmoid_overall_performance.pdf\") plt.show() # per instance", "seed and with each seed in its own plot \"\"\" file = Path(\"./data/sigmoid_example/PerformanceTrackingWrapper.jsonl\")", "Mean Performance per Instance\" ) grid.savefig(\"output/cma_performance_per_instance.pdf\") plt.show() def performance_example(): \"\"\" Plot Sigmoid performance", "performance over time, divided by seed and with each seed in its own", "def per_instance_example(): \"\"\" Plot CMA performance for each training instance \"\"\" file =", "grid.savefig(\"output/sigmoid_overall_performance_per_seed_hue.pdf\") plt.show() # per instance seed (col) with plotting_context(\"poster\"): grid = plot_performance( data,", "import matplotlib.pyplot as plt def per_instance_example(): \"\"\" Plot CMA performance for each training", "from dacbench.plotting import plot_performance_per_instance, plot_performance import matplotlib.pyplot as plt def per_instance_example(): \"\"\" Plot", "data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid = plot_performance_per_instance( data, title=\"CMA Mean Performance per", "file = Path(\"./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl\") logs = load_logs(file) data = log2dataframe(logs, wide=True, drop_columns=[\"time\"]) grid =" ]
[ "self.val = x # self.next = None # Definition for a binary tree", "# self.next = None # Definition for a binary tree node. # class", "ListNode :rtype: TreeNode \"\"\" def convert(head, n): if n == 0: return None", "return root n, ptr = 0, head while ptr: n += 1 ptr", "n - n // 2 - 1) return root n, ptr = 0,", "# self.val = x # self.next = None # Definition for a binary", "for a binary tree node. # class TreeNode(object): # def __init__(self, x): #", "x # self.left = None # self.right = None class Solution(object): def sortedListToBST(self,", "singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val = x", "= convert(head, n // 2) root.right = convert(mid.next, n - n // 2", "__init__(self, x): # self.val = x # self.left = None # self.right =", "\"\"\" def convert(head, n): if n == 0: return None mid = head", "convert(head, n): if n == 0: return None mid = head for i", "# def __init__(self, x): # self.val = x # self.next = None #", "- 1) return root n, ptr = 0, head while ptr: n +=", "1) return root n, ptr = 0, head while ptr: n += 1", "class ListNode(object): # def __init__(self, x): # self.val = x # self.next =", "head: ListNode :rtype: TreeNode \"\"\" def convert(head, n): if n == 0: return", "mid = mid.next root = TreeNode(mid.val) root.left = convert(head, n // 2) root.right", "convert(head, n // 2) root.right = convert(mid.next, n - n // 2 -", "if n == 0: return None mid = head for i in range(n", "n): if n == 0: return None mid = head for i in", "root.right = convert(mid.next, n - n // 2 - 1) return root n,", "head): \"\"\" :type head: ListNode :rtype: TreeNode \"\"\" def convert(head, n): if n", "def sortedListToBST(self, head): \"\"\" :type head: ListNode :rtype: TreeNode \"\"\" def convert(head, n):", "None mid = head for i in range(n // 2): mid = mid.next", "ptr = 0, head while ptr: n += 1 ptr = ptr.next return", ":rtype: TreeNode \"\"\" def convert(head, n): if n == 0: return None mid", "for i in range(n // 2): mid = mid.next root = TreeNode(mid.val) root.left", "in range(n // 2): mid = mid.next root = TreeNode(mid.val) root.left = convert(head,", "Solution(object): def sortedListToBST(self, head): \"\"\" :type head: ListNode :rtype: TreeNode \"\"\" def convert(head,", "self.val = x # self.left = None # self.right = None class Solution(object):", "class Solution(object): def sortedListToBST(self, head): \"\"\" :type head: ListNode :rtype: TreeNode \"\"\" def", "// 2): mid = mid.next root = TreeNode(mid.val) root.left = convert(head, n //", "x # self.next = None # Definition for a binary tree node. #", "Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x):", "= None # self.right = None class Solution(object): def sortedListToBST(self, head): \"\"\" :type", "list. # class ListNode(object): # def __init__(self, x): # self.val = x #", "= None # Definition for a binary tree node. # class TreeNode(object): #", "n, ptr = 0, head while ptr: n += 1 ptr = ptr.next", "= head for i in range(n // 2): mid = mid.next root =", "Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val", "TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None", "node. # class TreeNode(object): # def __init__(self, x): # self.val = x #", "n // 2) root.right = convert(mid.next, n - n // 2 - 1)", "# Definition for singly-linked list. # class ListNode(object): # def __init__(self, x): #", "== 0: return None mid = head for i in range(n // 2):", "for singly-linked list. # class ListNode(object): # def __init__(self, x): # self.val =", "convert(mid.next, n - n // 2 - 1) return root n, ptr =", "head for i in range(n // 2): mid = mid.next root = TreeNode(mid.val)", "def convert(head, n): if n == 0: return None mid = head for", "a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val", "root.left = convert(head, n // 2) root.right = convert(mid.next, n - n //", "# self.val = x # self.left = None # self.right = None class", "= TreeNode(mid.val) root.left = convert(head, n // 2) root.right = convert(mid.next, n -", "mid.next root = TreeNode(mid.val) root.left = convert(head, n // 2) root.right = convert(mid.next,", "= 0, head while ptr: n += 1 ptr = ptr.next return convert(head,", "# def __init__(self, x): # self.val = x # self.left = None #", "n == 0: return None mid = head for i in range(n //", "TreeNode(mid.val) root.left = convert(head, n // 2) root.right = convert(mid.next, n - n", "__init__(self, x): # self.val = x # self.next = None # Definition for", "# self.left = None # self.right = None class Solution(object): def sortedListToBST(self, head):", "# Definition for a binary tree node. # class TreeNode(object): # def __init__(self,", "# self.right = None class Solution(object): def sortedListToBST(self, head): \"\"\" :type head: ListNode", "range(n // 2): mid = mid.next root = TreeNode(mid.val) root.left = convert(head, n", "None # self.right = None class Solution(object): def sortedListToBST(self, head): \"\"\" :type head:", "self.next = None # Definition for a binary tree node. # class TreeNode(object):", "- n // 2 - 1) return root n, ptr = 0, head", "0, head while ptr: n += 1 ptr = ptr.next return convert(head, n)", "TreeNode \"\"\" def convert(head, n): if n == 0: return None mid =", "// 2) root.right = convert(mid.next, n - n // 2 - 1) return", "= mid.next root = TreeNode(mid.val) root.left = convert(head, n // 2) root.right =", "// 2 - 1) return root n, ptr = 0, head while ptr:", "mid = head for i in range(n // 2): mid = mid.next root", "= None class Solution(object): def sortedListToBST(self, head): \"\"\" :type head: ListNode :rtype: TreeNode", "= x # self.left = None # self.right = None class Solution(object): def", "\"\"\" :type head: ListNode :rtype: TreeNode \"\"\" def convert(head, n): if n ==", "root n, ptr = 0, head while ptr: n += 1 ptr =", "return None mid = head for i in range(n // 2): mid =", "x): # self.val = x # self.left = None # self.right = None", "binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val =", "i in range(n // 2): mid = mid.next root = TreeNode(mid.val) root.left =", "tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x", "2) root.right = convert(mid.next, n - n // 2 - 1) return root", ":type head: ListNode :rtype: TreeNode \"\"\" def convert(head, n): if n == 0:", "def __init__(self, x): # self.val = x # self.next = None # Definition", "n // 2 - 1) return root n, ptr = 0, head while", "# class ListNode(object): # def __init__(self, x): # self.val = x # self.next", "ListNode(object): # def __init__(self, x): # self.val = x # self.next = None", "None class Solution(object): def sortedListToBST(self, head): \"\"\" :type head: ListNode :rtype: TreeNode \"\"\"", "= convert(mid.next, n - n // 2 - 1) return root n, ptr", "= x # self.next = None # Definition for a binary tree node.", "None # Definition for a binary tree node. # class TreeNode(object): # def", "0: return None mid = head for i in range(n // 2): mid", "self.right = None class Solution(object): def sortedListToBST(self, head): \"\"\" :type head: ListNode :rtype:", "root = TreeNode(mid.val) root.left = convert(head, n // 2) root.right = convert(mid.next, n", "x): # self.val = x # self.next = None # Definition for a", "sortedListToBST(self, head): \"\"\" :type head: ListNode :rtype: TreeNode \"\"\" def convert(head, n): if", "def __init__(self, x): # self.val = x # self.left = None # self.right", "self.left = None # self.right = None class Solution(object): def sortedListToBST(self, head): \"\"\"", "2): mid = mid.next root = TreeNode(mid.val) root.left = convert(head, n // 2)", "class TreeNode(object): # def __init__(self, x): # self.val = x # self.left =", "2 - 1) return root n, ptr = 0, head while ptr: n", "# class TreeNode(object): # def __init__(self, x): # self.val = x # self.left" ]
[ "api_out = False) elif query_type == 'affil_papers': obj_dicts = query_affil_papers(query = query_text, from_year", "def before_request(): if current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET', 'POST']) @bp.route('/index', methods=['GET',", "app = create_app() app.app_context().push() if query_type == 'author_papers': obj_dicts = query_author_papers(query = query_text,", "result_all=obj_dicts ) db.session.add(result) db.session.commit() return result.id @bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required def make_a_query(query_type): \"\"\"", "}, 'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/', methods = ['GET']) def query_author_papers(query = \"\", from_year", "ResetPasswordForm, authorIndexQueryForm from app.models import User, Result from app.email import send_password_reset_email from app.main", "\"\"\" job = Job.fetch(job_key, connection=current_app.redis) ### Return results if job.is_finished and job.result: result", "= EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username = form.username.data current_user.about_me = form.about_me.data db.session.commit() flash('Your changes", "query_text = query_text, query_from = from_year, query_affiliations = affils, query_locations= locations, user_querying =", "elif query_type == 'affil_papers': form = authorIndexQueryForm() if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from app.main.routes", "= n_results, unique_results = result.length_of_results), 200 elif result.query_type == 'author_papers': return render_template('query_results/author_papers.html', \\", "<job_key>. If job is still running, this will redirect to the same page", "form=form) def run_query(query_type, query_text, \\ from_year, locations, affils, api_key, \\ querying_user): \"\"\" Query", "= request.args.get('api_out')\"\"\" if locations: locations = [location.strip().lower() for location in locations.split(',')] if affils:", "query_text, query_from = from_year, query_affiliations = affils, query_locations= locations, user_querying = querying_user, length_of_results", "author_dict in \\ author_dicts.values()]) length_of_results = len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\ data = author_dicts,", "key to run your query under!'} if api_out == True: return jsonify(no_key_dict) else:", "pages for different queries if result.query_type == 'affil_papers': return render_template('query_results/affil_papers.html', \\ data =", "if query_type == 'affil_papers': affil_dicts = query_affil_papers(query = form.query_text.data, from_year = form.query_from.data, locations", "\"\", api_out = True): timeit_start = time.time() \"\"\"if request.args.get('query'): query = request.args.get('query') if", "request.args.get('affiliations', []): affils = request.args.get('affiliations', []) if request.args.get('api_key'): api_key = request.args.get('api_key') if request.args.get('api_out'):", "app.app_context().push() if query_type == 'author_papers': obj_dicts = query_author_papers(query = query_text, from_year = from_year,", "'affil_papers': obj_dicts = query_affil_papers(query = query_text, from_year = from_year, locations = locations, n_authors", "GeoText import time @bp.before_request def before_request(): if current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/',", "query_affil_papers(query = query_text, from_year = from_year, locations = locations, n_authors = 25, affils", "'', 'n' : ''}, 'info' : ''}, '/api/query/author_papers/' : {'parameters' : {'query' :", "= len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\ data = author_dicts, n_results = n_results, unique_results =", "'affil_papers': return render_template('query_results/affil_papers.html', \\ data = result.result_all, n_results = n_results, unique_results = result.length_of_results),", "under!'} if api_out == True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_author_papers_data(query,", "= result.query_text, query_from = result.query_from , query_location = result.query_locations, query_affiliations = result.query_affiliations) n_results", "return render_template('errors/data_error.html', data = result.result_all.get('error'), query_text = result.query_text, query_from = result.query_from , query_location", "api_key = form.api_key.data, api_out = False) n_results = sum([author_dict.get('total_count', 0) for author_dict in", "Result app = create_app() app.app_context().push() if query_type == 'author_papers': obj_dicts = query_author_papers(query =", "if request.args.get('locations'): locations = request.args.get('locations', []) if request.args.get('n', 25): n_authors = request.args.get('n', 25)", "@bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key): \"\"\" Results page for <job_key>. If job is still", "affils = request.args.get('affiliations', []) if request.args.get('api_key'): api_key = request.args.get('api_key') if request.args.get('api_out'): api_out =", "import Job from datetime import datetime, timezone from flask import render_template, flash, redirect,", "Config from werkzeug.urls import url_parse import itertools import re import ast import datetime", "job = Job.fetch(job_key, connection=current_app.redis) ### Return results if job.is_finished and job.result: result =", "locations = [location.strip().lower() for location in locations.split(',')] if affils: affils = [affil.strip().lower() for", "query_location = result.query_locations, query_affiliations = result.query_affiliations) n_results = sum([author_dict.get('total_count', 0) for author_dict in", "== False if query_type == 'affil_papers': affil_dicts = query_affil_papers(query = form.query_text.data, from_year =", "\\ result.result_all.values()]) ### Return different pages for different queries if result.query_type == 'affil_papers':", "['GET']) def help(): return {'endpoints' : {'/api/query/author_affils/' : {'parameters' : {'query' : '',", "if request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\" if locations: locations = [location.strip().lower() for location in", "== 'author_papers': return render_template('query_results/author_papers.html', \\ data = result.result_all, n_results = n_results, unique_results =", "Query data is returned in a nested dictionary and assigned to `obj_dicts` which", "@login_required def user(username): user = User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET', 'POST']) @login_required", "### Run the query without task queue if async == False if query_type", "n_authors = \"\", affils = \"\", api_key = \"\", api_out = True): timeit_start", "func=run_query, args=(query_type, form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is", "render_template('query_results/affil_papers.html', \\ data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 elif", "query_affil_papers(query = \"\", from_year = \"\", locations = \"\", n_authors = \"\", affils", "\"\"\" Results page for <job_key>. If job is still running, this will redirect", "### Return results if job.is_finished and job.result: result = Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return", "= \"\", affils = \"\", api_key = \"\", api_out = True): timeit_start =", "length_of_results), 200 return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key): \"\"\" Results page for", "= User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET', 'POST']) @login_required def edit_profile(): form =", "= len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\ data = affil_dicts, n_results = n_results, unique_results =", "with the link to refresh again. When its done, the refresh link will", "{job.get_id()}') return get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']: ### Run the query without task queue", "\"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') if", "== True: return jsonify(out_dict) else: return out_dict @bp.route('/api/query/affil_papers/', methods = ['GET']) def query_affil_papers(query", ": '<NAME>'} @bp.route('/api/query/author_papers/', methods = ['GET']) def query_author_papers(query = \"\", from_year = \"\",", "import datetime, timezone from flask import render_template, flash, redirect, url_for, request, jsonify,current_app from", "return render_template('index.html') @bp.route('/user/<username>') @login_required def user(username): user = User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user) @bp.route('/edit_profile',", "= form.about_me.data db.session.commit() flash('Your changes have been saved.') return redirect(url_for('main.edit_profile')) elif request.method ==", "this will redirect to the same page with the link to refresh again.", "query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() #print(f'`author_papers_w_location` for \"{query}\"", "'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/', methods = ['GET']) def query_author_papers(query = \"\", from_year =", "len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\ data = affil_dicts, n_results = n_results, unique_results = length_of_results),", "get_results(job_key): \"\"\" Results page for <job_key>. If job is still running, this will", "werkzeug.urls import url_parse import itertools import re import ast import datetime import pandas", "query_affiliations = affils, query_locations= locations, user_querying = querying_user, length_of_results = len(obj_dicts.keys()), result_all=obj_dicts )", "querying_user, length_of_results = len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result) db.session.commit() return result.id @bp.route('/query/<query_type>', methods=['GET', 'POST'])", "affils = request.args.get('affiliations', []) #if request.args.get('api_key'): # api_key = request.args.get('api_key') if locations: locations", "again. When its done, the refresh link will link to the tables. \"\"\"", "request.method == 'GET': form.username.data = current_user.username form.about_me.data = current_user.about_me return render_template('edit_profile.html', title='Edit Profile',", "will link to the tables. \"\"\" job = Job.fetch(job_key, connection=current_app.redis) ### Return results", "== 'affil_papers': return render_template('query_results/affil_papers.html', \\ data = result.result_all, n_results = n_results, unique_results =", "\"\", api_key = \"\", api_out = True): timeit_start = time.time() #if request.args.get('query'): #", "app.main_api_functions import * from rq.job import Job from datetime import datetime, timezone from", "current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET', 'POST']) @bp.route('/index', methods=['GET', 'POST']) def index(): return", "task queue if async == False if query_type == 'affil_papers': affil_dicts = query_affil_papers(query", "time @bp.before_request def before_request(): if current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET', 'POST'])", "assigned to `obj_dicts` which is stored in the db. \"\"\" ### Import create_app", "is still processing else: return render_template('query_results/processing.html', job_key = job_key), 202 ####### @bp.route('/api/help/', methods", "if api_out == True: return jsonify(out_dict) else: return out_dict @bp.route('/api/query/affil_papers/', methods = ['GET'])", "= query_affil_papers(query = query_text, from_year = from_year, locations = locations, n_authors = 25,", "affil_dicts.values()]) length_of_results = len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\ data = affil_dicts, n_results = n_results,", "api_key = \"\", api_out = True): timeit_start = time.time() #if request.args.get('query'): # query", "''}, 'info' : ''}, '/api/query/author_papers/' : {'parameters' : {'query' : '', 'from' :", "= request.args.get('affiliations', []) if request.args.get('api_key'): api_key = request.args.get('api_key') if request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\"", "= length_of_results), 200 elif query_type == 'author_papers': author_dicts = query_author_papers(query = form.query_text.data, from_year", "page for <job_key>. If job is still running, this will redirect to the", "result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is running! Your ID is : {job.get_id()}') return get_results(job.get_id())", "= ['GET']) def query_affil_papers(query = \"\", from_year = \"\", locations = \"\", n_authors", "is returned in a nested dictionary and assigned to `obj_dicts` which is stored", "= sum([author_dict.get('total_count', 0) for author_dict in \\ author_dicts.values()]) length_of_results = len(author_dicts.keys()) return render_template('query_results/author_papers.html',", "sum([affil_dict['total_count'] for affil_dict in \\ affil_dicts.values()]) length_of_results = len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\ data", "data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 ### Refresh if", "query = request.args.get('query') #if request.args.get('from'): # from_year = int(request.args.get('from', 2000)) #if request.args.get('locations'): #", "api_key = \"\", api_out = True): timeit_start = time.time() \"\"\"if request.args.get('query'): query =", "= form.username.data current_user.about_me = form.about_me.data db.session.commit() flash('Your changes have been saved.') return redirect(url_for('main.edit_profile'))", "False) result = Result( query_type = query_type, query_text = query_text, query_from = from_year,", "import LoginForm, RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from app.models import User, Result", "def query_affil_papers(query = \"\", from_year = \"\", locations = \"\", n_authors = \"\",", "'author_papers': return render_template('query_results/author_papers.html', \\ data = result.result_all, n_results = n_results, unique_results = result.length_of_results),", "methods=['GET', 'POST']) @bp.route('/index', methods=['GET', 'POST']) def index(): return render_template('index.html') @bp.route('/user/<username>') @login_required def user(username):", "time.time() print(f'`query_author_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds.", "results.') print(f'`query_affil_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds.", "return out_dict @bp.route('/api/query/affil_papers/', methods = ['GET']) def query_affil_papers(query = \"\", from_year = \"\",", "# affils = request.args.get('affiliations', []) #if request.args.get('api_key'): # api_key = request.args.get('api_key') if locations:", "render_template('errors/data_error.html', data = result.result_all.get('error'), query_text = result.query_text, query_from = result.query_from , query_location =", "import Config from app.main_api_functions import * from rq.job import Job from datetime import", "ID is : {job.get_id()}') return get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']: ### Run the query", "0) for author_dict in \\ author_dicts.values()]) length_of_results = len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\ data", "query without task queue if async == False if query_type == 'affil_papers': affil_dicts", "methods=['GET', 'POST']) @login_required def edit_profile(): form = EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username = form.username.data", "from config import Config from app.main_api_functions import * from rq.job import Job from", "query_type == 'affil_papers': obj_dicts = query_affil_papers(query = query_text, from_year = from_year, locations =", "return render_template('query_results/author_papers.html', \\ data = author_dicts, n_results = n_results, unique_results = length_of_results), 200", "methods = ['GET']) def help(): return {'endpoints' : {'/api/query/author_affils/' : {'parameters' : {'query'", "'<NAME>'} @bp.route('/api/query/author_papers/', methods = ['GET']) def query_author_papers(query = \"\", from_year = \"\", locations", "@bp.before_request def before_request(): if current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET', 'POST']) @bp.route('/index',", "= \"\", locations = \"\", n_authors = \"\", affils = \"\", api_key =", "result.query_from , query_location = result.query_locations, query_affiliations = result.query_affiliations) n_results = sum([author_dict.get('total_count', 0) for", "= \"\", api_out = True): timeit_start = time.time() \"\"\"if request.args.get('query'): query = request.args.get('query')", "query_locations= locations, user_querying = querying_user, length_of_results = len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result) db.session.commit() return", "\"\", from_year = \"\", locations = \"\", n_authors = \"\", affils = \"\",", "0) for author_dict in \\ result.result_all.values()]) ### Return different pages for different queries", "[location.strip().lower() for location in locations.split(',')] if affils: affils = [affil.strip().lower() for affil in", "query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() print(f'`query_author_papers` for \"{query}\"", "author_dict in \\ result.result_all.values()]) ### Return different pages for different queries if result.query_type", "= form.locations.data, n_authors = 25, affils = form.affiliations.data, api_key = form.api_key.data, api_out =", "authorIndexQueryForm() if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from app.main.routes import run_query ### If async ==", "- timeit_start,4)} seconds. Returning results.') print(f'`query_affil_papers` for \"{query}\" from {from_year} onward ran in", "the db. \"\"\" ### Import create_app because this function is run by the", "result = Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return render_template('errors/data_error.html', data = result.result_all.get('error'), query_text = result.query_text,", "= True): timeit_start = time.time() \"\"\"if request.args.get('query'): query = request.args.get('query') if request.args.get('from'): from_year", "if affils: affils = [affil.strip().lower() for affil in affils.split(',')] if not api_key: no_key_dict", "an API key to run your query under!'} if api_out == True: return", "= 25, affils = form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results =", "#if request.args.get('locations'): # locations = request.args.get('locations', []) ##if request.args.get('n', 25): # n_authors =", "query_type == 'author_papers': author_dicts = query_author_papers(query = form.query_text.data, from_year = form.query_from.data, locations =", "\\ author_dicts.values()]) length_of_results = len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\ data = author_dicts, n_results =", "== 'author_papers': obj_dicts = query_author_papers(query = query_text, from_year = from_year, locations = locations,", "return no_key_dict out_dict = query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end =", "request, jsonify,current_app from flask_login import login_user, logout_user, current_user, login_required from config import Config", "import itertools import re import ast import datetime import pandas as pd from", "= result.result_all.get('error'), query_text = result.query_text, query_from = result.query_from , query_location = result.query_locations, query_affiliations", "n_results = n_results, unique_results = result.length_of_results), 200 elif result.query_type == 'author_papers': return render_template('query_results/author_papers.html',", "form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key): \"\"\" Results page for <job_key>. If job is", "form.about_me.data = current_user.about_me return render_template('edit_profile.html', title='Edit Profile', form=form) def run_query(query_type, query_text, \\ from_year,", ": {'query' : '', 'from' : '', 'locations' : '', 'n' : ''},", "== True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_author_papers_data(query, from_year, locations, affils,", "= result.query_locations, query_affiliations = result.query_affiliations) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\", "= request.args.get('affiliations', []) #if request.args.get('api_key'): # api_key = request.args.get('api_key') if locations: locations =", "redirect(url_for('main.edit_profile')) elif request.method == 'GET': form.username.data = current_user.username form.about_me.data = current_user.about_me return render_template('edit_profile.html',", "= [location.strip().lower() for location in locations.split(',')] if affils: affils = [affil.strip().lower() for affil", "timeit_start,4)} seconds. Returning results.') if api_out == True: return jsonify(out_dict) else: return out_dict", "current_user.about_me = form.about_me.data db.session.commit() flash('Your changes have been saved.') return redirect(url_for('main.edit_profile')) elif request.method", "flask import render_template, flash, redirect, url_for, request, jsonify,current_app from flask_login import login_user, logout_user,", "affils, api_key = api_key, api_out = False) elif query_type == 'affil_papers': obj_dicts =", "''}, 'info' : ''} }, 'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/', methods = ['GET']) def", "= request.args.get('api_key') if locations: locations = [location.strip().lower() for location in locations.split(',')] if affils:", "if query_type == 'author_papers': obj_dicts = query_author_papers(query = query_text, from_year = from_year, locations", "api_out = False) result = Result( query_type = query_type, query_text = query_text, query_from", "the same page with the link to refresh again. When its done, the", "data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 elif result.query_type ==", "user(username): user = User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET', 'POST']) @login_required def edit_profile():", "query_author_papers(query = \"\", from_year = \"\", locations = \"\", n_authors = \"\", affils", "from the form job = current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data,", "onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') print(f'`query_affil_papers` for \"{query}\" from", "locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() #print(f'`author_papers_w_location` for \"{query}\" from {from_year}", "is : {job.get_id()}') return get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']: ### Run the query without", "from_year = int(request.args.get('from', 2000)) #if request.args.get('locations'): # locations = request.args.get('locations', []) ##if request.args.get('n',", "from rq.job import Job from datetime import datetime, timezone from flask import render_template,", "api_key, api_out = False) result = Result( query_type = query_type, query_text = query_text,", "in {round(timeit_end - timeit_start,4)} seconds. Returning results.') print(f'`query_affil_papers` for \"{query}\" from {from_year} onward", "nested dictionary and assigned to `obj_dicts` which is stored in the db. \"\"\"", "request.args.get('locations'): locations = request.args.get('locations', []) if request.args.get('n', 25): n_authors = request.args.get('n', 25) if", "= querying_user, length_of_results = len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result) db.session.commit() return result.id @bp.route('/query/<query_type>', methods=['GET',", "= Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return render_template('errors/data_error.html', data = result.result_all.get('error'), query_text = result.query_text, query_from", "from_year = form.query_from.data, locations = form.locations.data, n_authors = 25, affils = form.affiliations.data, api_key", "locations.split(',')] if affils: affils = [affil.strip().lower() for affil in affils.split(',')] if not api_key:", "time.time() #print(f'`author_papers_w_location` for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds.", "query_author_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors = 25, affils", "create_app because this function is run by the worker from app import create_app", "@bp.route('/api/help/', methods = ['GET']) def help(): return {'endpoints' : {'/api/query/author_affils/' : {'parameters' :", "author_dicts, n_results = n_results, unique_results = length_of_results), 200 return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET'])", "not current_app.config['ASYNC_FUNC']: ### Run the query without task queue if async == False", "from werkzeug.urls import url_parse import itertools import re import ast import datetime import", "request.args.get('from'): from_year = int(request.args.get('from', 2000)) if request.args.get('locations'): locations = request.args.get('locations', []) if request.args.get('n',", "print(f'`query_affil_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning", "= int(request.args.get('from', 2000)) if request.args.get('locations'): locations = request.args.get('locations', []) if request.args.get('n', 25): n_authors", "[]) #if request.args.get('api_key'): # api_key = request.args.get('api_key') if locations: locations = [location.strip().lower() for", "\"\"\" Query data is returned in a nested dictionary and assigned to `obj_dicts`", "'info' : ''} }, 'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/', methods = ['GET']) def query_author_papers(query", "unique_results = result.length_of_results), 200 elif result.query_type == 'author_papers': return render_template('query_results/author_papers.html', \\ data =", "\"\", affils = \"\", api_key = \"\", api_out = True): timeit_start = time.time()", "= 25, affils = affils, api_key = api_key, api_out = False) result =", "affils = \"\", api_key = \"\", api_out = True): timeit_start = time.time() #if", "form.locations.data, form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is running! Your ID is", "locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() print(f'`query_author_papers` for \"{query}\" from {from_year}", "no_key_dict out_dict = query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time()", "query under!'} if api_out == True: return jsonify(no_key_dict) else: return no_key_dict out_dict =", "= n_results, unique_results = length_of_results), 200 elif query_type == 'author_papers': author_dicts = query_author_papers(query", "= query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() #print(f'`author_papers_w_location` for", "run_query(query_type, query_text, \\ from_year, locations, affils, api_key, \\ querying_user): \"\"\" Query data is", "'Please supply an API key to run your query under!'} if api_out ==", "query_type == 'author_papers': obj_dicts = query_author_papers(query = query_text, from_year = from_year, locations =", "True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_affil_papers_data(query, from_year, locations, affils, n_authors,", ": ''}, 'info' : ''}, '/api/query/author_papers/' : {'parameters' : {'query' : '', 'from'", "by the worker from app import create_app from app.models import Result app =", "obj_dicts = query_affil_papers(query = query_text, from_year = from_year, locations = locations, n_authors =", "affil_dict in \\ affil_dicts.values()]) length_of_results = len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\ data = affil_dicts,", "your query under!'} if api_out == True: return jsonify(no_key_dict) else: return no_key_dict out_dict", "query_text = result.query_text, query_from = result.query_from , query_location = result.query_locations, query_affiliations = result.query_affiliations)", "#if request.args.get('affiliations', []): # affils = request.args.get('affiliations', []) #if request.args.get('api_key'): # api_key =", "methods=['GET', 'POST']) @login_required def make_a_query(query_type): \"\"\" \"\"\" if query_type == 'author_papers': form =", "from app.main.routes import run_query ### If async == True, queue a task with", "def edit_profile(): form = EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username = form.username.data current_user.about_me = form.about_me.data", "False) elif query_type == 'affil_papers': obj_dicts = query_affil_papers(query = query_text, from_year = from_year,", "bp from config import Config from app.main_api_functions import * from rq.job import Job", "job is still running, this will redirect to the same page with the", "'locations' : '', 'n' : ''}, 'info' : ''} }, 'general_notes' : '<NAME>'}", "'', 'locations' : '', 'n' : ''}, 'info' : ''}, '/api/query/author_papers/' : {'parameters'", "form = authorIndexQueryForm() elif query_type == 'affil_papers': form = authorIndexQueryForm() if form.validate_on_submit(): if", ", query_location = result.query_locations, query_affiliations = result.query_affiliations) n_results = sum([author_dict.get('total_count', 0) for author_dict", "api_out == True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_affil_papers_data(query, from_year, locations,", "return render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET', 'POST']) @login_required def edit_profile(): form = EditProfileForm(current_user.username) if", "\\ from_year, locations, affils, api_key, \\ querying_user): \"\"\" Query data is returned in", "redirect, url_for, request, jsonify,current_app from flask_login import login_user, logout_user, current_user, login_required from config", "stored in the db. \"\"\" ### Import create_app because this function is run", "saved.') return redirect(url_for('main.edit_profile')) elif request.method == 'GET': form.username.data = current_user.username form.about_me.data = current_user.about_me", "in \\ result.result_all.values()]) ### Return different pages for different queries if result.query_type ==", "= query_author_papers(query = query_text, from_year = from_year, locations = locations, n_authors = 25,", "= request.args.get('query') if request.args.get('from'): from_year = int(request.args.get('from', 2000)) if request.args.get('locations'): locations = request.args.get('locations',", "= form.api_key.data, api_out = False) n_results = sum([affil_dict['total_count'] for affil_dict in \\ affil_dicts.values()])", "query_from = result.query_from , query_location = result.query_locations, query_affiliations = result.query_affiliations) n_results = sum([author_dict.get('total_count',", "data = author_dicts, n_results = n_results, unique_results = length_of_results), 200 return render_template('make_a_query.html', form=form)", "result.id @bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required def make_a_query(query_type): \"\"\" \"\"\" if query_type == 'author_papers':", "''} }, 'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/', methods = ['GET']) def query_author_papers(query = \"\",", "Your ID is : {job.get_id()}') return get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']: ### Run the", "### Refresh if job is still processing else: return render_template('query_results/processing.html', job_key = job_key),", "config import Config from werkzeug.urls import url_parse import itertools import re import ast", "result.result_all.get('error'): return render_template('errors/data_error.html', data = result.result_all.get('error'), query_text = result.query_text, query_from = result.query_from ,", "db.session.commit() flash('Your changes have been saved.') return redirect(url_for('main.edit_profile')) elif request.method == 'GET': form.username.data", "flask_login import login_user, logout_user, current_user, login_required from config import Config from werkzeug.urls import", "unique_results = length_of_results), 200 elif query_type == 'author_papers': author_dicts = query_author_papers(query = form.query_text.data,", "result.length_of_results), 200 ### Refresh if job is still processing else: return render_template('query_results/processing.html', job_key", "locations: locations = [location.strip().lower() for location in locations.split(',')] if affils: affils = [affil.strip().lower()", "timeit_end = time.time() print(f'`query_author_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end -", ": '', 'locations' : '', 'n' : ''}, 'info' : ''} }, 'general_notes'", "{'/api/query/author_affils/' : {'parameters' : {'query' : '', 'from' : '', 'locations' : '',", "from app.main_api_functions import * from rq.job import Job from datetime import datetime, timezone", "api_out = True): timeit_start = time.time() #if request.args.get('query'): # query = request.args.get('query') #if", "out_dict @bp.route('/api/query/affil_papers/', methods = ['GET']) def query_affil_papers(query = \"\", from_year = \"\", locations", "from_year = from_year, locations = locations, n_authors = 25, affils = affils, api_key", "If async == True, queue a task with the args from the form", "the worker from app import create_app from app.models import Result app = create_app()", "query = request.args.get('query') if request.args.get('from'): from_year = int(request.args.get('from', 2000)) if request.args.get('locations'): locations =", "= query_text, query_from = from_year, query_affiliations = affils, query_locations= locations, user_querying = querying_user,", "return redirect(url_for('main.edit_profile')) elif request.method == 'GET': form.username.data = current_user.username form.about_me.data = current_user.about_me return", ": '', 'n' : ''}, 'info' : ''}, '/api/query/author_papers/' : {'parameters' : {'query'", "= int(request.args.get('from', 2000)) #if request.args.get('locations'): # locations = request.args.get('locations', []) ##if request.args.get('n', 25):", "# api_key = request.args.get('api_key') if locations: locations = [location.strip().lower() for location in locations.split(',')]", "- timeit_start,4)} seconds. Returning results.') if api_out == True: return jsonify(out_dict) else: return", "datetime import pandas as pd from collections import Counter from geotext import GeoText", "render_template, flash, redirect, url_for, request, jsonify,current_app from flask_login import login_user, logout_user, current_user, login_required", "= authorIndexQueryForm() elif query_type == 'affil_papers': form = authorIndexQueryForm() if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']:", "its done, the refresh link will link to the tables. \"\"\" job =", "app import db from app.main.forms import LoginForm, RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm", "time.time() \"\"\"if request.args.get('query'): query = request.args.get('query') if request.args.get('from'): from_year = int(request.args.get('from', 2000)) if", "api_out == True: return jsonify(out_dict) else: return out_dict @bp.route('/api/query/affil_papers/', methods = ['GET']) def", "db.session.commit() @bp.route('/', methods=['GET', 'POST']) @bp.route('/index', methods=['GET', 'POST']) def index(): return render_template('index.html') @bp.route('/user/<username>') @login_required", "request.args.get('n', 25): n_authors = request.args.get('n', 25) if request.args.get('affiliations', []): affils = request.args.get('affiliations', [])", "app.main.forms import LoginForm, RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from app.models import User,", "url_for, request, jsonify,current_app from flask_login import login_user, logout_user, current_user, login_required from config import", "= time.time() #if request.args.get('query'): # query = request.args.get('query') #if request.args.get('from'): # from_year =", "if async == False if query_type == 'affil_papers': affil_dicts = query_affil_papers(query = form.query_text.data,", "return jsonify(no_key_dict) else: return no_key_dict out_dict = query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start,", "in the db. \"\"\" ### Import create_app because this function is run by", "n_results, unique_results = length_of_results), 200 elif query_type == 'author_papers': author_dicts = query_author_papers(query =", "queries if result.query_type == 'affil_papers': return render_template('query_results/affil_papers.html', \\ data = result.result_all, n_results =", "args=(query_type, form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is running!", "= len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result) db.session.commit() return result.id @bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required def", "\"\"\" \"\"\" if query_type == 'author_papers': form = authorIndexQueryForm() elif query_type == 'affil_papers':", "LoginForm, RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from app.models import User, Result from", "if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from app.main.routes import run_query ### If async == True,", "request.args.get('affiliations', []) if request.args.get('api_key'): api_key = request.args.get('api_key') if request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\" if", "return no_key_dict out_dict = query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end =", "get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']: ### Run the query without task queue if async", "n_authors, timeit_start, api_key) timeit_end = time.time() print(f'`query_author_papers` for \"{query}\" from {from_year} onward ran", "to the tables. \"\"\" job = Job.fetch(job_key, connection=current_app.redis) ### Return results if job.is_finished", "results if job.is_finished and job.result: result = Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return render_template('errors/data_error.html', data", "logout_user, current_user, login_required from config import Config from werkzeug.urls import url_parse import itertools", "def index(): return render_template('index.html') @bp.route('/user/<username>') @login_required def user(username): user = User.query.filter_by(username=username).first_or_404() return render_template('user.html',", "= False) result = Result( query_type = query_type, query_text = query_text, query_from =", "no_key_dict = {'error' : 'Please supply an API key to run your query", "job.is_finished and job.result: result = Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return render_template('errors/data_error.html', data = result.result_all.get('error'),", "task with the args from the form job = current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data,", "app.email import send_password_reset_email from app.main import bp from config import Config from app.main_api_functions", ": '', 'locations' : '', 'n' : ''}, 'info' : ''}, '/api/query/author_papers/' :", "''}, '/api/query/author_papers/' : {'parameters' : {'query' : '', 'from' : '', 'locations' :", "= time.time() #print(f'`author_papers_w_location` for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)}", "not api_key: no_key_dict = {'error' : 'Please supply an API key to run", "request.args.get('locations', []) if request.args.get('n', 25): n_authors = request.args.get('n', 25) if request.args.get('affiliations', []): affils", "if form.validate_on_submit(): current_user.username = form.username.data current_user.about_me = form.about_me.data db.session.commit() flash('Your changes have been", "the args from the form job = current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data, form.query_from.data, form.locations.data,", "= api_key, api_out = False) elif query_type == 'affil_papers': obj_dicts = query_affil_papers(query =", "affils, n_authors, timeit_start, api_key) timeit_end = time.time() print(f'`query_author_papers` for \"{query}\" from {from_year} onward", "import ast import datetime import pandas as pd from collections import Counter from", "is still running, this will redirect to the same page with the link", "timeit_start, api_key) timeit_end = time.time() #print(f'`author_papers_w_location` for \"{query}\" from {from_year} onward ran in", "elif query_type == 'author_papers': author_dicts = query_author_papers(query = form.query_text.data, from_year = form.query_from.data, locations", "form.api_key.data, api_out = False) n_results = sum([affil_dict['total_count'] for affil_dict in \\ affil_dicts.values()]) length_of_results", "author_dicts = query_author_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors =", "Refresh if job is still processing else: return render_template('query_results/processing.html', job_key = job_key), 202", "query_type == 'affil_papers': affil_dicts = query_affil_papers(query = form.query_text.data, from_year = form.query_from.data, locations =", "<reponame>mrtoronto/find-a-lab<filename>app/main/routes.py from app import db from app.main.forms import LoginForm, RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm,", "to the same page with the link to refresh again. When its done,", "a nested dictionary and assigned to `obj_dicts` which is stored in the db.", "current_user.username form.about_me.data = current_user.about_me return render_template('edit_profile.html', title='Edit Profile', form=form) def run_query(query_type, query_text, \\", "affils, query_locations= locations, user_querying = querying_user, length_of_results = len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result) db.session.commit()", "locations = form.locations.data, n_authors = 25, affils = form.affiliations.data, api_key = form.api_key.data, api_out", "still running, this will redirect to the same page with the link to", "Results page for <job_key>. If job is still running, this will redirect to", "= \"\", api_out = True): timeit_start = time.time() #if request.args.get('query'): # query =", "\"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') print(f'`query_affil_papers`", "25): n_authors = request.args.get('n', 25) if request.args.get('affiliations', []): affils = request.args.get('affiliations', []) if", "= \"\", api_key = \"\", api_out = True): timeit_start = time.time() #if request.args.get('query'):", "= \"\", n_authors = \"\", affils = \"\", api_key = \"\", api_out =", "\\ data = affil_dicts, n_results = n_results, unique_results = length_of_results), 200 elif query_type", "= n_results, unique_results = result.length_of_results), 200 ### Refresh if job is still processing", "render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key): \"\"\" Results page for <job_key>. If job", "in \\ affil_dicts.values()]) length_of_results = len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\ data = affil_dicts, n_results", "import re import ast import datetime import pandas as pd from collections import", ": ''} }, 'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/', methods = ['GET']) def query_author_papers(query =", "query_type == 'author_papers': form = authorIndexQueryForm() elif query_type == 'affil_papers': form = authorIndexQueryForm()", "api_key = api_key, api_out = False) result = Result( query_type = query_type, query_text", "datetime import datetime, timezone from flask import render_template, flash, redirect, url_for, request, jsonify,current_app", "= False) n_results = sum([affil_dict['total_count'] for affil_dict in \\ affil_dicts.values()]) length_of_results = len(affil_dicts.keys())", "'POST']) @bp.route('/index', methods=['GET', 'POST']) def index(): return render_template('index.html') @bp.route('/user/<username>') @login_required def user(username): user", "without task queue if async == False if query_type == 'affil_papers': affil_dicts =", "unique_results = result.length_of_results), 200 ### Refresh if job is still processing else: return", "@login_required def edit_profile(): form = EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username = form.username.data current_user.about_me =", "return jsonify(out_dict) else: return out_dict @bp.route('/api/query/affil_papers/', methods = ['GET']) def query_affil_papers(query = \"\",", "result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 ### Refresh if job is", "[]): # affils = request.args.get('affiliations', []) #if request.args.get('api_key'): # api_key = request.args.get('api_key') if", "n_results, unique_results = length_of_results), 200 return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key): \"\"\"", "length_of_results), 200 elif query_type == 'author_papers': author_dicts = query_author_papers(query = form.query_text.data, from_year =", "request.args.get('from'): # from_year = int(request.args.get('from', 2000)) #if request.args.get('locations'): # locations = request.args.get('locations', [])", "n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ author_dicts.values()]) length_of_results = len(author_dicts.keys()) return", "if api_out == True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_author_papers_data(query, from_year,", "current_user.username = form.username.data current_user.about_me = form.about_me.data db.session.commit() flash('Your changes have been saved.') return", "int(request.args.get('from', 2000)) if request.args.get('locations'): locations = request.args.get('locations', []) if request.args.get('n', 25): n_authors =", "query is running! Your ID is : {job.get_id()}') return get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']:", "from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() print(f'`query_author_papers` for \"{query}\" from", "{'endpoints' : {'/api/query/author_affils/' : {'parameters' : {'query' : '', 'from' : '', 'locations'", "User, Result from app.email import send_password_reset_email from app.main import bp from config import", "User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET', 'POST']) @login_required def edit_profile(): form = EditProfileForm(current_user.username)", "timezone from flask import render_template, flash, redirect, url_for, request, jsonify,current_app from flask_login import", "affils = form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results = sum([author_dict.get('total_count', 0)", "geotext import GeoText import time @bp.before_request def before_request(): if current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc)", "@login_required def make_a_query(query_type): \"\"\" \"\"\" if query_type == 'author_papers': form = authorIndexQueryForm() elif", "'from' : '', 'locations' : '', 'n' : ''}, 'info' : ''}, '/api/query/author_papers/'", "api_out = request.args.get('api_out')\"\"\" if locations: locations = [location.strip().lower() for location in locations.split(',')] if", "form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from app.main.routes import run_query ### If async == True, queue", "### If async == True, queue a task with the args from the", "running, this will redirect to the same page with the link to refresh", "job = current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT'])", "import url_parse import itertools import re import ast import datetime import pandas as", "form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results = sum([author_dict.get('total_count', 0) for author_dict", "datetime, timezone from flask import render_template, flash, redirect, url_for, request, jsonify,current_app from flask_login", "form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is running! Your ID", "return render_template('query_results/affil_papers.html', \\ data = affil_dicts, n_results = n_results, unique_results = length_of_results), 200", "== 'affil_papers': obj_dicts = query_affil_papers(query = query_text, from_year = from_year, locations = locations,", "form = authorIndexQueryForm() if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from app.main.routes import run_query ### If", "have been saved.') return redirect(url_for('main.edit_profile')) elif request.method == 'GET': form.username.data = current_user.username form.about_me.data", "= form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results = sum([author_dict.get('total_count', 0) for", "Run the query without task queue if async == False if query_type ==", "is run by the worker from app import create_app from app.models import Result", "from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') if api_out", "api_key = form.api_key.data, api_out = False) n_results = sum([affil_dict['total_count'] for affil_dict in \\", "= Job.fetch(job_key, connection=current_app.redis) ### Return results if job.is_finished and job.result: result = Result.query.filter_by(id=job.result).first()", "async == False if query_type == 'affil_papers': affil_dicts = query_affil_papers(query = form.query_text.data, from_year", "request.args.get('query') if request.args.get('from'): from_year = int(request.args.get('from', 2000)) if request.args.get('locations'): locations = request.args.get('locations', [])", "affils = affils, api_key = api_key, api_out = False) elif query_type == 'affil_papers':", "current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query", "= query_affil_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors = 25,", "if locations: locations = [location.strip().lower() for location in locations.split(',')] if affils: affils =", "from_year = int(request.args.get('from', 2000)) if request.args.get('locations'): locations = request.args.get('locations', []) if request.args.get('n', 25):", "if api_out == True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_affil_papers_data(query, from_year,", "api_key: no_key_dict = {'error' : 'Please supply an API key to run your", "which is stored in the db. \"\"\" ### Import create_app because this function", "n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ result.result_all.values()]) ### Return different pages", "Import create_app because this function is run by the worker from app import", "user_querying = querying_user, length_of_results = len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result) db.session.commit() return result.id @bp.route('/query/<query_type>',", "elif result.query_type == 'author_papers': return render_template('query_results/author_papers.html', \\ data = result.result_all, n_results = n_results,", "in a nested dictionary and assigned to `obj_dicts` which is stored in the", "locations = locations, n_authors = 25, affils = affils, api_key = api_key, api_out", "return render_template('edit_profile.html', title='Edit Profile', form=form) def run_query(query_type, query_text, \\ from_year, locations, affils, api_key,", "request.args.get('locations', []) ##if request.args.get('n', 25): # n_authors = request.args.get('n', 25) #if request.args.get('affiliations', []):", "render_template('query_results/author_papers.html', \\ data = author_dicts, n_results = n_results, unique_results = length_of_results), 200 return", "if request.args.get('api_key'): api_key = request.args.get('api_key') if request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\" if locations: locations", ": 'Please supply an API key to run your query under!'} if api_out", "form.api_key.data, api_out = False) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ author_dicts.values()])", "= datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET', 'POST']) @bp.route('/index', methods=['GET', 'POST']) def index(): return render_template('index.html')", "from app.email import send_password_reset_email from app.main import bp from config import Config from", "import create_app from app.models import Result app = create_app() app.app_context().push() if query_type ==", "'n' : ''}, 'info' : ''} }, 'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/', methods =", "= n_results, unique_results = length_of_results), 200 return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key):", "[affil.strip().lower() for affil in affils.split(',')] if not api_key: no_key_dict = {'error' : 'Please", "n_results = n_results, unique_results = length_of_results), 200 elif query_type == 'author_papers': author_dicts =", "request.args.get('n', 25): # n_authors = request.args.get('n', 25) #if request.args.get('affiliations', []): # affils =", "= length_of_results), 200 return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key): \"\"\" Results page", "2000)) if request.args.get('locations'): locations = request.args.get('locations', []) if request.args.get('n', 25): n_authors = request.args.get('n',", "app import create_app from app.models import Result app = create_app() app.app_context().push() if query_type", "print(f'`query_author_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning", "request.args.get('locations'): # locations = request.args.get('locations', []) ##if request.args.get('n', 25): # n_authors = request.args.get('n',", "##if request.args.get('n', 25): # n_authors = request.args.get('n', 25) #if request.args.get('affiliations', []): # affils", "'author_papers': obj_dicts = query_author_papers(query = query_text, from_year = from_year, locations = locations, n_authors", "timeit_end = time.time() #print(f'`author_papers_w_location` for \"{query}\" from {from_year} onward ran in {round(timeit_end -", "title='Edit Profile', form=form) def run_query(query_type, query_text, \\ from_year, locations, affils, api_key, \\ querying_user):", "'author_papers': author_dicts = query_author_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors", "#if request.args.get('api_key'): # api_key = request.args.get('api_key') if locations: locations = [location.strip().lower() for location", "= api_key, api_out = False) result = Result( query_type = query_type, query_text =", "refresh link will link to the tables. \"\"\" job = Job.fetch(job_key, connection=current_app.redis) ###", "ast import datetime import pandas as pd from collections import Counter from geotext", "from_year, locations = locations, n_authors = 25, affils = affils, api_key = api_key,", "api_key) timeit_end = time.time() #print(f'`author_papers_w_location` for \"{query}\" from {from_year} onward ran in {round(timeit_end", "itertools import re import ast import datetime import pandas as pd from collections", "25, affils = affils, api_key = api_key, api_out = False) result = Result(", "login_user, logout_user, current_user, login_required from config import Config from werkzeug.urls import url_parse import", "result.query_affiliations) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ result.result_all.values()]) ### Return different", "= query_type, query_text = query_text, query_from = from_year, query_affiliations = affils, query_locations= locations,", "= [affil.strip().lower() for affil in affils.split(',')] if not api_key: no_key_dict = {'error' :", "#print(f'`author_papers_w_location` for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning", "= False) elif query_type == 'affil_papers': obj_dicts = query_affil_papers(query = query_text, from_year =", "link will link to the tables. \"\"\" job = Job.fetch(job_key, connection=current_app.redis) ### Return", "def get_results(job_key): \"\"\" Results page for <job_key>. If job is still running, this", "result.result_all.get('error'), query_text = result.query_text, query_from = result.query_from , query_location = result.query_locations, query_affiliations =", "= result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 ### Refresh if job", "200 return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key): \"\"\" Results page for <job_key>.", "= 25, affils = affils, api_key = api_key, api_out = False) elif query_type", "job is still processing else: return render_template('query_results/processing.html', job_key = job_key), 202 ####### @bp.route('/api/help/',", "request.args.get('api_key') if request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\" if locations: locations = [location.strip().lower() for location", "is stored in the db. \"\"\" ### Import create_app because this function is", "querying_user): \"\"\" Query data is returned in a nested dictionary and assigned to", "form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is running! Your", "\"\", locations = \"\", n_authors = \"\", affils = \"\", api_key = \"\",", ": ''}, '/api/query/author_papers/' : {'parameters' : {'query' : '', 'from' : '', 'locations'", "link to the tables. \"\"\" job = Job.fetch(job_key, connection=current_app.redis) ### Return results if", "request.args.get('api_key'): # api_key = request.args.get('api_key') if locations: locations = [location.strip().lower() for location in", "@bp.route('/user/<username>') @login_required def user(username): user = User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET', 'POST'])", "= request.args.get('n', 25) #if request.args.get('affiliations', []): # affils = request.args.get('affiliations', []) #if request.args.get('api_key'):", "# n_authors = request.args.get('n', 25) #if request.args.get('affiliations', []): # affils = request.args.get('affiliations', [])", "if query_type == 'author_papers': form = authorIndexQueryForm() elif query_type == 'affil_papers': form =", "redirect to the same page with the link to refresh again. When its", "'', 'n' : ''}, 'info' : ''} }, 'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/', methods", "form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is running! Your ID is : {job.get_id()}')", "import Counter from geotext import GeoText import time @bp.before_request def before_request(): if current_user.is_authenticated:", "query_type = query_type, query_text = query_text, query_from = from_year, query_affiliations = affils, query_locations=", "{round(timeit_end - timeit_start,4)} seconds. Returning results.') if api_out == True: return jsonify(out_dict) else:", "this function is run by the worker from app import create_app from app.models", ") db.session.add(result) db.session.commit() return result.id @bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required def make_a_query(query_type): \"\"\" \"\"\"", "render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET', 'POST']) @login_required def edit_profile(): form = EditProfileForm(current_user.username) if form.validate_on_submit():", "will redirect to the same page with the link to refresh again. When", "request.args.get('affiliations', []): # affils = request.args.get('affiliations', []) #if request.args.get('api_key'): # api_key = request.args.get('api_key')", "queue a task with the args from the form job = current_app.task_queue.enqueue_call( func=run_query,", "jsonify(out_dict) else: return out_dict @bp.route('/api/query/affil_papers/', methods = ['GET']) def query_affil_papers(query = \"\", from_year", "query_affil_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors = 25, affils", "\"\"\"if request.args.get('query'): query = request.args.get('query') if request.args.get('from'): from_year = int(request.args.get('from', 2000)) if request.args.get('locations'):", "from flask import render_template, flash, redirect, url_for, request, jsonify,current_app from flask_login import login_user,", "def user(username): user = User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET', 'POST']) @login_required def", "result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 elif result.query_type == 'author_papers': return", "request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\" if locations: locations = [location.strip().lower() for location in locations.split(',')]", "worker from app import create_app from app.models import Result app = create_app() app.app_context().push()", "from app.models import Result app = create_app() app.app_context().push() if query_type == 'author_papers': obj_dicts", "form.query_from.data, locations = form.locations.data, n_authors = 25, affils = form.affiliations.data, api_key = form.api_key.data,", "from app import db from app.main.forms import LoginForm, RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm,", "\"\"\" ### Import create_app because this function is run by the worker from", "import Result app = create_app() app.app_context().push() if query_type == 'author_papers': obj_dicts = query_author_papers(query", "[]) ##if request.args.get('n', 25): # n_authors = request.args.get('n', 25) #if request.args.get('affiliations', []): #", "the query without task queue if async == False if query_type == 'affil_papers':", "app.models import Result app = create_app() app.app_context().push() if query_type == 'author_papers': obj_dicts =", "edit_profile(): form = EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username = form.username.data current_user.about_me = form.about_me.data db.session.commit()", "flash(f'Your query is running! Your ID is : {job.get_id()}') return get_results(job.get_id()) elif not", ": {job.get_id()}') return get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']: ### Run the query without task", "refresh again. When its done, the refresh link will link to the tables.", "length_of_results = len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\ data = author_dicts, n_results = n_results, unique_results", "config import Config from app.main_api_functions import * from rq.job import Job from datetime", "affil in affils.split(',')] if not api_key: no_key_dict = {'error' : 'Please supply an", ": {'parameters' : {'query' : '', 'from' : '', 'locations' : '', 'n'", "import pandas as pd from collections import Counter from geotext import GeoText import", "current_user.about_me return render_template('edit_profile.html', title='Edit Profile', form=form) def run_query(query_type, query_text, \\ from_year, locations, affils,", "return result.id @bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required def make_a_query(query_type): \"\"\" \"\"\" if query_type ==", "request.args.get('n', 25) #if request.args.get('affiliations', []): # affils = request.args.get('affiliations', []) #if request.args.get('api_key'): #", "no_key_dict out_dict = query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time()", "locations, n_authors = 25, affils = affils, api_key = api_key, api_out = False)", "form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors = 25, affils = form.affiliations.data,", "= request.args.get('api_key') if request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\" if locations: locations = [location.strip().lower() for", "= ['GET']) def query_author_papers(query = \"\", from_year = \"\", locations = \"\", n_authors", "25) #if request.args.get('affiliations', []): # affils = request.args.get('affiliations', []) #if request.args.get('api_key'): # api_key", "from app import create_app from app.models import Result app = create_app() app.app_context().push() if", "db.session.add(result) db.session.commit() return result.id @bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required def make_a_query(query_type): \"\"\" \"\"\" if", "Profile', form=form) def run_query(query_type, query_text, \\ from_year, locations, affils, api_key, \\ querying_user): \"\"\"", "= affil_dicts, n_results = n_results, unique_results = length_of_results), 200 elif query_type == 'author_papers':", "collections import Counter from geotext import GeoText import time @bp.before_request def before_request(): if", "locations = request.args.get('locations', []) if request.args.get('n', 25): n_authors = request.args.get('n', 25) if request.args.get('affiliations',", "@bp.route('/edit_profile', methods=['GET', 'POST']) @login_required def edit_profile(): form = EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username =", "25, affils = form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results = sum([affil_dict['total_count']", "function is run by the worker from app import create_app from app.models import", "for location in locations.split(',')] if affils: affils = [affil.strip().lower() for affil in affils.split(',')]", "['GET']) def query_author_papers(query = \"\", from_year = \"\", locations = \"\", n_authors =", "False if query_type == 'affil_papers': affil_dicts = query_affil_papers(query = form.query_text.data, from_year = form.query_from.data,", "affils, n_authors, timeit_start, api_key) timeit_end = time.time() #print(f'`author_papers_w_location` for \"{query}\" from {from_year} onward", "import time @bp.before_request def before_request(): if current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET',", "if job is still processing else: return render_template('query_results/processing.html', job_key = job_key), 202 #######", "from geotext import GeoText import time @bp.before_request def before_request(): if current_user.is_authenticated: current_user.last_seen =", "result.query_type == 'author_papers': return render_template('query_results/author_papers.html', \\ data = result.result_all, n_results = n_results, unique_results", "@bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required def make_a_query(query_type): \"\"\" \"\"\" if query_type == 'author_papers': form", "[]) if request.args.get('api_key'): api_key = request.args.get('api_key') if request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\" if locations:", "query_text, from_year = from_year, locations = locations, n_authors = 25, affils = affils,", "[]) if request.args.get('n', 25): n_authors = request.args.get('n', 25) if request.args.get('affiliations', []): affils =", "to run your query under!'} if api_out == True: return jsonify(no_key_dict) else: return", "'/api/query/author_papers/' : {'parameters' : {'query' : '', 'from' : '', 'locations' : '',", "int(request.args.get('from', 2000)) #if request.args.get('locations'): # locations = request.args.get('locations', []) ##if request.args.get('n', 25): #", "Result( query_type = query_type, query_text = query_text, query_from = from_year, query_affiliations = affils,", "####### @bp.route('/api/help/', methods = ['GET']) def help(): return {'endpoints' : {'/api/query/author_affils/' : {'parameters'", "query_from = from_year, query_affiliations = affils, query_locations= locations, user_querying = querying_user, length_of_results =", "the form job = current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data, current_user.username),", "form.username.data current_user.about_me = form.about_me.data db.session.commit() flash('Your changes have been saved.') return redirect(url_for('main.edit_profile')) elif", "form job = current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'],", "supply an API key to run your query under!'} if api_out == True:", "from_year = \"\", locations = \"\", n_authors = \"\", affils = \"\", api_key", "EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username = form.username.data current_user.about_me = form.about_me.data db.session.commit() flash('Your changes have", "in locations.split(',')] if affils: affils = [affil.strip().lower() for affil in affils.split(',')] if not", "affils.split(',')] if not api_key: no_key_dict = {'error' : 'Please supply an API key", "= request.args.get('locations', []) ##if request.args.get('n', 25): # n_authors = request.args.get('n', 25) #if request.args.get('affiliations',", "pandas as pd from collections import Counter from geotext import GeoText import time", "query_author_papers(query = query_text, from_year = from_year, locations = locations, n_authors = 25, affils", "data is returned in a nested dictionary and assigned to `obj_dicts` which is", "= affils, api_key = api_key, api_out = False) elif query_type == 'affil_papers': obj_dicts", "changes have been saved.') return redirect(url_for('main.edit_profile')) elif request.method == 'GET': form.username.data = current_user.username", "== True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_affil_papers_data(query, from_year, locations, affils,", "= query_author_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors = 25,", "args from the form job = current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data,", "# locations = request.args.get('locations', []) ##if request.args.get('n', 25): # n_authors = request.args.get('n', 25)", "timeit_start,4)} seconds. Returning results.') print(f'`query_affil_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end", "'affil_papers': form = authorIndexQueryForm() if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from app.main.routes import run_query ###", "return get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']: ### Run the query without task queue if", "25) if request.args.get('affiliations', []): affils = request.args.get('affiliations', []) if request.args.get('api_key'): api_key = request.args.get('api_key')", "affil_dicts = query_affil_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors =", "result.length_of_results), 200 elif result.query_type == 'author_papers': return render_template('query_results/author_papers.html', \\ data = result.result_all, n_results", "length_of_results = len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result) db.session.commit() return result.id @bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required", "current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET', 'POST']) @bp.route('/index', methods=['GET', 'POST']) def index():", "sum([author_dict.get('total_count', 0) for author_dict in \\ author_dicts.values()]) length_of_results = len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\", "25, affils = affils, api_key = api_key, api_out = False) elif query_type ==", "import render_template, flash, redirect, url_for, request, jsonify,current_app from flask_login import login_user, logout_user, current_user,", "api_key, \\ querying_user): \"\"\" Query data is returned in a nested dictionary and", "\\ affil_dicts.values()]) length_of_results = len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\ data = affil_dicts, n_results =", "if job.is_finished and job.result: result = Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return render_template('errors/data_error.html', data =", "#if request.args.get('query'): # query = request.args.get('query') #if request.args.get('from'): # from_year = int(request.args.get('from', 2000))", "* from rq.job import Job from datetime import datetime, timezone from flask import", "== 'author_papers': form = authorIndexQueryForm() elif query_type == 'affil_papers': form = authorIndexQueryForm() if", "ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') if api_out == True: return", "timeit_start = time.time() \"\"\"if request.args.get('query'): query = request.args.get('query') if request.args.get('from'): from_year = int(request.args.get('from',", "'author_papers': form = authorIndexQueryForm() elif query_type == 'affil_papers': form = authorIndexQueryForm() if form.validate_on_submit():", "'', 'from' : '', 'locations' : '', 'n' : ''}, 'info' : ''},", "request.args.get('api_out')\"\"\" if locations: locations = [location.strip().lower() for location in locations.split(',')] if affils: affils", "dictionary and assigned to `obj_dicts` which is stored in the db. \"\"\" ###", "affils = affils, api_key = api_key, api_out = False) result = Result( query_type", "Result from app.email import send_password_reset_email from app.main import bp from config import Config", "== True, queue a task with the args from the form job =", "current_app.config['ASYNC_FUNC']: ### Run the query without task queue if async == False if", "request.args.get('api_key'): api_key = request.args.get('api_key') if request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\" if locations: locations =", "if request.args.get('n', 25): n_authors = request.args.get('n', 25) if request.args.get('affiliations', []): affils = request.args.get('affiliations',", "elif not current_app.config['ASYNC_FUNC']: ### Run the query without task queue if async ==", "db.session.commit() return result.id @bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required def make_a_query(query_type): \"\"\" \"\"\" if query_type", "elif request.method == 'GET': form.username.data = current_user.username form.about_me.data = current_user.about_me return render_template('edit_profile.html', title='Edit", "from collections import Counter from geotext import GeoText import time @bp.before_request def before_request():", "200 elif result.query_type == 'author_papers': return render_template('query_results/author_papers.html', \\ data = result.result_all, n_results =", "@bp.route('/api/query/affil_papers/', methods = ['GET']) def query_affil_papers(query = \"\", from_year = \"\", locations =", "= from_year, query_affiliations = affils, query_locations= locations, user_querying = querying_user, length_of_results = len(obj_dicts.keys()),", "api_out = False) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ author_dicts.values()]) length_of_results", "methods = ['GET']) def query_author_papers(query = \"\", from_year = \"\", locations = \"\",", "done, the refresh link will link to the tables. \"\"\" job = Job.fetch(job_key,", "import User, Result from app.email import send_password_reset_email from app.main import bp from config", "send_password_reset_email from app.main import bp from config import Config from app.main_api_functions import *", "RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from app.models import User, Result from app.email", "{'error' : 'Please supply an API key to run your query under!'} if", "seconds. Returning results.') print(f'`query_affil_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end -", "{from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') print(f'`query_affil_papers` for \"{query}\"", "current_user, login_required from config import Config from werkzeug.urls import url_parse import itertools import", "create_app from app.models import Result app = create_app() app.app_context().push() if query_type == 'author_papers':", "render_template('query_results/author_papers.html', \\ data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 ###", "timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is running! Your ID is : {job.get_id()}') return get_results(job.get_id()) elif", "= result.length_of_results), 200 elif result.query_type == 'author_papers': return render_template('query_results/author_papers.html', \\ data = result.result_all,", "api_key = request.args.get('api_key') if request.args.get('api_out'): api_out = request.args.get('api_out')\"\"\" if locations: locations = [location.strip().lower()", "out_dict = query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() #print(f'`author_papers_w_location`", "= {'error' : 'Please supply an API key to run your query under!'}", "'n' : ''}, 'info' : ''}, '/api/query/author_papers/' : {'parameters' : {'query' : '',", "2000)) #if request.args.get('locations'): # locations = request.args.get('locations', []) ##if request.args.get('n', 25): # n_authors", "if request.args.get('affiliations', []): affils = request.args.get('affiliations', []) if request.args.get('api_key'): api_key = request.args.get('api_key') if", ": ''}, 'info' : ''} }, 'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/', methods = ['GET'])", "from config import Config from werkzeug.urls import url_parse import itertools import re import", "onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') if api_out == True:", "import db from app.main.forms import LoginForm, RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from", "processing else: return render_template('query_results/processing.html', job_key = job_key), 202 ####### @bp.route('/api/help/', methods = ['GET'])", "@bp.route('/', methods=['GET', 'POST']) @bp.route('/index', methods=['GET', 'POST']) def index(): return render_template('index.html') @bp.route('/user/<username>') @login_required def", "def query_author_papers(query = \"\", from_year = \"\", locations = \"\", n_authors = \"\",", "@bp.route('/api/query/author_papers/', methods = ['GET']) def query_author_papers(query = \"\", from_year = \"\", locations =", "methods=['GET']) def get_results(job_key): \"\"\" Results page for <job_key>. If job is still running,", "= form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results = sum([affil_dict['total_count'] for affil_dict", "return {'endpoints' : {'/api/query/author_affils/' : {'parameters' : {'query' : '', 'from' : '',", "= True): timeit_start = time.time() #if request.args.get('query'): # query = request.args.get('query') #if request.args.get('from'):", "as pd from collections import Counter from geotext import GeoText import time @bp.before_request", "the refresh link will link to the tables. \"\"\" job = Job.fetch(job_key, connection=current_app.redis)", "= sum([author_dict.get('total_count', 0) for author_dict in \\ result.result_all.values()]) ### Return different pages for", "\\ querying_user): \"\"\" Query data is returned in a nested dictionary and assigned", "return render_template('query_results/author_papers.html', \\ data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200", "Job.fetch(job_key, connection=current_app.redis) ### Return results if job.is_finished and job.result: result = Result.query.filter_by(id=job.result).first() if", "help(): return {'endpoints' : {'/api/query/author_affils/' : {'parameters' : {'query' : '', 'from' :", "[]): affils = request.args.get('affiliations', []) if request.args.get('api_key'): api_key = request.args.get('api_key') if request.args.get('api_out'): api_out", "login_required from config import Config from werkzeug.urls import url_parse import itertools import re", "authorIndexQueryForm() elif query_type == 'affil_papers': form = authorIndexQueryForm() if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from", "import GeoText import time @bp.before_request def before_request(): if current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit()", "form.about_me.data db.session.commit() flash('Your changes have been saved.') return redirect(url_for('main.edit_profile')) elif request.method == 'GET':", "n_authors = 25, affils = affils, api_key = api_key, api_out = False) result", "flash, redirect, url_for, request, jsonify,current_app from flask_login import login_user, logout_user, current_user, login_required from", "request.args.get('query') #if request.args.get('from'): # from_year = int(request.args.get('from', 2000)) #if request.args.get('locations'): # locations =", "request.args.get('api_key') if locations: locations = [location.strip().lower() for location in locations.split(',')] if affils: affils", "current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is running! Your ID is : {job.get_id()}') return", "'affil_papers': affil_dicts = query_affil_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors", "ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') print(f'`query_affil_papers` for \"{query}\" from {from_year}", "app.main import bp from config import Config from app.main_api_functions import * from rq.job", "and assigned to `obj_dicts` which is stored in the db. \"\"\" ### Import", "'POST']) @login_required def make_a_query(query_type): \"\"\" \"\"\" if query_type == 'author_papers': form = authorIndexQueryForm()", "n_authors = 25, affils = form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results", "{'parameters' : {'query' : '', 'from' : '', 'locations' : '', 'n' :", "import send_password_reset_email from app.main import bp from config import Config from app.main_api_functions import", "If job is still running, this will redirect to the same page with", "= request.args.get('locations', []) if request.args.get('n', 25): n_authors = request.args.get('n', 25) if request.args.get('affiliations', []):", "= form.api_key.data, api_out = False) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\", "n_authors, timeit_start, api_key) timeit_end = time.time() #print(f'`author_papers_w_location` for \"{query}\" from {from_year} onward ran", "n_results, unique_results = result.length_of_results), 200 ### Refresh if job is still processing else:", "rq.job import Job from datetime import datetime, timezone from flask import render_template, flash,", "from flask_login import login_user, logout_user, current_user, login_required from config import Config from werkzeug.urls", "'', 'from' : '', 'locations' : '', 'n' : ''}, 'info' : ''}", "index(): return render_template('index.html') @bp.route('/user/<username>') @login_required def user(username): user = User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user)", "= authorIndexQueryForm() if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from app.main.routes import run_query ### If async", "\\ data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 ### Refresh", "because this function is run by the worker from app import create_app from", "'locations' : '', 'n' : ''}, 'info' : ''}, '/api/query/author_papers/' : {'parameters' :", "['GET']) def query_affil_papers(query = \"\", from_year = \"\", locations = \"\", n_authors =", "Returning results.') print(f'`query_affil_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)}", "seconds. Returning results.') if api_out == True: return jsonify(out_dict) else: return out_dict @bp.route('/api/query/affil_papers/',", "async == True, queue a task with the args from the form job", "page with the link to refresh again. When its done, the refresh link", "queue if async == False if query_type == 'affil_papers': affil_dicts = query_affil_papers(query =", "with the args from the form job = current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data, form.query_from.data,", "'POST']) def index(): return render_template('index.html') @bp.route('/user/<username>') @login_required def user(username): user = User.query.filter_by(username=username).first_or_404() return", "### Return different pages for different queries if result.query_type == 'affil_papers': return render_template('query_results/affil_papers.html',", "url_parse import itertools import re import ast import datetime import pandas as pd", "= query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() print(f'`query_author_papers` for", "methods=['GET', 'POST']) def index(): return render_template('index.html') @bp.route('/user/<username>') @login_required def user(username): user = User.query.filter_by(username=username).first_or_404()", "= form.query_from.data, locations = form.locations.data, n_authors = 25, affils = form.affiliations.data, api_key =", "False) n_results = sum([affil_dict['total_count'] for affil_dict in \\ affil_dicts.values()]) length_of_results = len(affil_dicts.keys()) return", "length_of_results = len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\ data = affil_dicts, n_results = n_results, unique_results", "= author_dicts, n_results = n_results, unique_results = length_of_results), 200 return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\",", "affils, api_key, \\ querying_user): \"\"\" Query data is returned in a nested dictionary", "n_authors = 25, affils = affils, api_key = api_key, api_out = False) elif", "\"\", api_out = True): timeit_start = time.time() #if request.args.get('query'): # query = request.args.get('query')", "affil_dicts, n_results = n_results, unique_results = length_of_results), 200 elif query_type == 'author_papers': author_dicts", "\"\", n_authors = \"\", affils = \"\", api_key = \"\", api_out = True):", "affils: affils = [affil.strip().lower() for affil in affils.split(',')] if not api_key: no_key_dict =", "a task with the args from the form job = current_app.task_queue.enqueue_call( func=run_query, args=(query_type,", "api_key = api_key, api_out = False) elif query_type == 'affil_papers': obj_dicts = query_affil_papers(query", "locations = \"\", n_authors = \"\", affils = \"\", api_key = \"\", api_out", ": {'/api/query/author_affils/' : {'parameters' : {'query' : '', 'from' : '', 'locations' :", "'from' : '', 'locations' : '', 'n' : ''}, 'info' : ''} },", "for author_dict in \\ author_dicts.values()]) length_of_results = len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\ data =", "EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from app.models import User, Result from app.email import", "the tables. \"\"\" job = Job.fetch(job_key, connection=current_app.redis) ### Return results if job.is_finished and", "for author_dict in \\ result.result_all.values()]) ### Return different pages for different queries if", "= ['GET']) def help(): return {'endpoints' : {'/api/query/author_affils/' : {'parameters' : {'query' :", "for affil in affils.split(',')] if not api_key: no_key_dict = {'error' : 'Please supply", "n_authors = request.args.get('n', 25) if request.args.get('affiliations', []): affils = request.args.get('affiliations', []) if request.args.get('api_key'):", "from_year, locations, affils, api_key, \\ querying_user): \"\"\" Query data is returned in a", "elif query_type == 'affil_papers': obj_dicts = query_affil_papers(query = query_text, from_year = from_year, locations", "from app.main.forms import LoginForm, RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from app.models import", "connection=current_app.redis) ### Return results if job.is_finished and job.result: result = Result.query.filter_by(id=job.result).first() if result.result_all.get('error'):", "{from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') if api_out ==", "= current_user.about_me return render_template('edit_profile.html', title='Edit Profile', form=form) def run_query(query_type, query_text, \\ from_year, locations,", "n_authors = request.args.get('n', 25) #if request.args.get('affiliations', []): # affils = request.args.get('affiliations', []) #if", "from app.models import User, Result from app.email import send_password_reset_email from app.main import bp", "= request.args.get('n', 25) if request.args.get('affiliations', []): affils = request.args.get('affiliations', []) if request.args.get('api_key'): api_key", "render_template('index.html') @bp.route('/user/<username>') @login_required def user(username): user = User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET',", "# from_year = int(request.args.get('from', 2000)) #if request.args.get('locations'): # locations = request.args.get('locations', []) ##if", "25, affils = form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results = sum([author_dict.get('total_count',", "still processing else: return render_template('query_results/processing.html', job_key = job_key), 202 ####### @bp.route('/api/help/', methods =", "= sum([affil_dict['total_count'] for affil_dict in \\ affil_dicts.values()]) length_of_results = len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\", "\"\", api_key = \"\", api_out = True): timeit_start = time.time() \"\"\"if request.args.get('query'): query", "Counter from geotext import GeoText import time @bp.before_request def before_request(): if current_user.is_authenticated: current_user.last_seen", "form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your query is running! Your ID is :", "= from_year, locations = locations, n_authors = 25, affils = affils, api_key =", "form.validate_on_submit(): current_user.username = form.username.data current_user.about_me = form.about_me.data db.session.commit() flash('Your changes have been saved.')", "data = result.result_all.get('error'), query_text = result.query_text, query_from = result.query_from , query_location = result.query_locations,", "True): timeit_start = time.time() \"\"\"if request.args.get('query'): query = request.args.get('query') if request.args.get('from'): from_year =", "form = EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username = form.username.data current_user.about_me = form.about_me.data db.session.commit() flash('Your", "query_type, query_text = query_text, query_from = from_year, query_affiliations = affils, query_locations= locations, user_querying", "False) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ author_dicts.values()]) length_of_results = len(author_dicts.keys())", "jsonify(no_key_dict) else: return no_key_dict out_dict = query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key)", "job_key), 202 ####### @bp.route('/api/help/', methods = ['GET']) def help(): return {'endpoints' : {'/api/query/author_affils/'", "unique_results = length_of_results), 200 return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key): \"\"\" Results", "25): # n_authors = request.args.get('n', 25) #if request.args.get('affiliations', []): # affils = request.args.get('affiliations',", "query_affiliations = result.query_affiliations) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ result.result_all.values()]) ###", "locations, user_querying = querying_user, length_of_results = len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result) db.session.commit() return result.id", "# query = request.args.get('query') #if request.args.get('from'): # from_year = int(request.args.get('from', 2000)) #if request.args.get('locations'):", "if current_app.config['ASYNC_FUNC']: from app.main.routes import run_query ### If async == True, queue a", "if current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET', 'POST']) @bp.route('/index', methods=['GET', 'POST']) def", "= form.query_text.data, from_year = form.query_from.data, locations = form.locations.data, n_authors = 25, affils =", "import datetime import pandas as pd from collections import Counter from geotext import", "n_results = sum([affil_dict['total_count'] for affil_dict in \\ affil_dicts.values()]) length_of_results = len(affil_dicts.keys()) return render_template('query_results/affil_papers.html',", "= False) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ author_dicts.values()]) length_of_results =", "for affil_dict in \\ affil_dicts.values()]) length_of_results = len(affil_dicts.keys()) return render_template('query_results/affil_papers.html', \\ data =", "{'query' : '', 'from' : '', 'locations' : '', 'n' : ''}, 'info'", "'', 'locations' : '', 'n' : ''}, 'info' : ''} }, 'general_notes' :", "else: return no_key_dict out_dict = query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end", "results.') if api_out == True: return jsonify(out_dict) else: return out_dict @bp.route('/api/query/affil_papers/', methods =", "create_app() app.app_context().push() if query_type == 'author_papers': obj_dicts = query_author_papers(query = query_text, from_year =", "= current_user.username form.about_me.data = current_user.about_me return render_template('edit_profile.html', title='Edit Profile', form=form) def run_query(query_type, query_text,", "import login_user, logout_user, current_user, login_required from config import Config from werkzeug.urls import url_parse", "from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.') print(f'`query_affil_papers` for", "= result.query_affiliations) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ result.result_all.values()]) ### Return", "return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def get_results(job_key): \"\"\" Results page for <job_key>. If", "= query_text, from_year = from_year, locations = locations, n_authors = 25, affils =", "different pages for different queries if result.query_type == 'affil_papers': return render_template('query_results/affil_papers.html', \\ data", "for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.')", "and job.result: result = Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return render_template('errors/data_error.html', data = result.result_all.get('error'), query_text", "locations = request.args.get('locations', []) ##if request.args.get('n', 25): # n_authors = request.args.get('n', 25) #if", "import * from rq.job import Job from datetime import datetime, timezone from flask", "= affils, query_locations= locations, user_querying = querying_user, length_of_results = len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result)", "from datetime import datetime, timezone from flask import render_template, flash, redirect, url_for, request,", "result.query_locations, query_affiliations = result.query_affiliations) n_results = sum([author_dict.get('total_count', 0) for author_dict in \\ result.result_all.values()])", "= result.length_of_results), 200 ### Refresh if job is still processing else: return render_template('query_results/processing.html',", "tables. \"\"\" job = Job.fetch(job_key, connection=current_app.redis) ### Return results if job.is_finished and job.result:", "def run_query(query_type, query_text, \\ from_year, locations, affils, api_key, \\ querying_user): \"\"\" Query data", "n_results, unique_results = result.length_of_results), 200 elif result.query_type == 'author_papers': return render_template('query_results/author_papers.html', \\ data", "`obj_dicts` which is stored in the db. \"\"\" ### Import create_app because this", "datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET', 'POST']) @bp.route('/index', methods=['GET', 'POST']) def index(): return render_template('index.html') @bp.route('/user/<username>')", "to refresh again. When its done, the refresh link will link to the", "timeit_start, api_key) timeit_end = time.time() print(f'`query_author_papers` for \"{query}\" from {from_year} onward ran in", "Job from datetime import datetime, timezone from flask import render_template, flash, redirect, url_for,", "Config from app.main_api_functions import * from rq.job import Job from datetime import datetime,", "ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from app.models import User, Result from app.email import send_password_reset_email from", "link to refresh again. When its done, the refresh link will link to", "Returning results.') if api_out == True: return jsonify(out_dict) else: return out_dict @bp.route('/api/query/affil_papers/', methods", "flash('Your changes have been saved.') return redirect(url_for('main.edit_profile')) elif request.method == 'GET': form.username.data =", "location in locations.split(',')] if affils: affils = [affil.strip().lower() for affil in affils.split(',')] if", "render_template('query_results/affil_papers.html', \\ data = affil_dicts, n_results = n_results, unique_results = length_of_results), 200 elif", "user=user) @bp.route('/edit_profile', methods=['GET', 'POST']) @login_required def edit_profile(): form = EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username", "render_template('query_results/processing.html', job_key = job_key), 202 ####### @bp.route('/api/help/', methods = ['GET']) def help(): return", "When its done, the refresh link will link to the tables. \"\"\" job", "= result.query_from , query_location = result.query_locations, query_affiliations = result.query_affiliations) n_results = sum([author_dict.get('total_count', 0)", "= job_key), 202 ####### @bp.route('/api/help/', methods = ['GET']) def help(): return {'endpoints' :", "authorIndexQueryForm from app.models import User, Result from app.email import send_password_reset_email from app.main import", "affils, api_key = api_key, api_out = False) result = Result( query_type = query_type,", "run_query ### If async == True, queue a task with the args from", "before_request(): if current_user.is_authenticated: current_user.last_seen = datetime.datetime.now(timezone.utc) db.session.commit() @bp.route('/', methods=['GET', 'POST']) @bp.route('/index', methods=['GET', 'POST'])", "app.models import User, Result from app.email import send_password_reset_email from app.main import bp from", "different queries if result.query_type == 'affil_papers': return render_template('query_results/affil_papers.html', \\ data = result.result_all, n_results", "API key to run your query under!'} if api_out == True: return jsonify(no_key_dict)", "return jsonify(no_key_dict) else: return no_key_dict out_dict = query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start,", "\"\"\" if query_type == 'author_papers': form = authorIndexQueryForm() elif query_type == 'affil_papers': form", "db from app.main.forms import LoginForm, RegistrationForm, EditProfileForm, \\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from app.models", "in affils.split(',')] if not api_key: no_key_dict = {'error' : 'Please supply an API", "api_out = False) n_results = sum([affil_dict['total_count'] for affil_dict in \\ affil_dicts.values()]) length_of_results =", "request.args.get('query'): # query = request.args.get('query') #if request.args.get('from'): # from_year = int(request.args.get('from', 2000)) #if", "obj_dicts = query_author_papers(query = query_text, from_year = from_year, locations = locations, n_authors =", "query_type == 'affil_papers': form = authorIndexQueryForm() if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from app.main.routes import", "= \"\", from_year = \"\", locations = \"\", n_authors = \"\", affils =", "= request.args.get('query') #if request.args.get('from'): # from_year = int(request.args.get('from', 2000)) #if request.args.get('locations'): # locations", "= time.time() \"\"\"if request.args.get('query'): query = request.args.get('query') if request.args.get('from'): from_year = int(request.args.get('from', 2000))", "form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results = sum([affil_dict['total_count'] for affil_dict in", "= create_app() app.app_context().push() if query_type == 'author_papers': obj_dicts = query_author_papers(query = query_text, from_year", "returned in a nested dictionary and assigned to `obj_dicts` which is stored in", "api_out = True): timeit_start = time.time() \"\"\"if request.args.get('query'): query = request.args.get('query') if request.args.get('from'):", "is running! Your ID is : {job.get_id()}') return get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']: ###", "'info' : ''}, '/api/query/author_papers/' : {'parameters' : {'query' : '', 'from' : '',", "Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return render_template('errors/data_error.html', data = result.result_all.get('error'), query_text = result.query_text, query_from =", "n_results = n_results, unique_results = length_of_results), 200 return render_template('make_a_query.html', form=form) @bp.route(\"/results/<job_key>\", methods=['GET']) def", "been saved.') return redirect(url_for('main.edit_profile')) elif request.method == 'GET': form.username.data = current_user.username form.about_me.data =", "if result.result_all.get('error'): return render_template('errors/data_error.html', data = result.result_all.get('error'), query_text = result.query_text, query_from = result.query_from", "n_results = n_results, unique_results = result.length_of_results), 200 ### Refresh if job is still", "\\ data = author_dicts, n_results = n_results, unique_results = length_of_results), 200 return render_template('make_a_query.html',", "import Config from werkzeug.urls import url_parse import itertools import re import ast import", "the link to refresh again. When its done, the refresh link will link", "same page with the link to refresh again. When its done, the refresh", "sum([author_dict.get('total_count', 0) for author_dict in \\ result.result_all.values()]) ### Return different pages for different", "result.query_type == 'affil_papers': return render_template('query_results/affil_papers.html', \\ data = result.result_all, n_results = n_results, unique_results", "run your query under!'} if api_out == True: return jsonify(no_key_dict) else: return no_key_dict", "True): timeit_start = time.time() #if request.args.get('query'): # query = request.args.get('query') #if request.args.get('from'): #", "def help(): return {'endpoints' : {'/api/query/author_affils/' : {'parameters' : {'query' : '', 'from'", "import run_query ### If async == True, queue a task with the args", "in \\ author_dicts.values()]) length_of_results = len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\ data = author_dicts, n_results", "pd from collections import Counter from geotext import GeoText import time @bp.before_request def", "db. \"\"\" ### Import create_app because this function is run by the worker", "out_dict = query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() print(f'`query_author_papers`", "running! Your ID is : {job.get_id()}') return get_results(job.get_id()) elif not current_app.config['ASYNC_FUNC']: ### Run", "to `obj_dicts` which is stored in the db. \"\"\" ### Import create_app because", "app.main.routes import run_query ### If async == True, queue a task with the", "affils = [affil.strip().lower() for affil in affils.split(',')] if not api_key: no_key_dict = {'error'", "for <job_key>. If job is still running, this will redirect to the same", "time.time() #if request.args.get('query'): # query = request.args.get('query') #if request.args.get('from'): # from_year = int(request.args.get('from',", "= \"\", api_key = \"\", api_out = True): timeit_start = time.time() \"\"\"if request.args.get('query'):", "= time.time() print(f'`query_author_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end - timeit_start,4)}", "else: return no_key_dict out_dict = query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end", "== 'GET': form.username.data = current_user.username form.about_me.data = current_user.about_me return render_template('edit_profile.html', title='Edit Profile', form=form)", "Return different pages for different queries if result.query_type == 'affil_papers': return render_template('query_results/affil_papers.html', \\", "query_text, \\ from_year, locations, affils, api_key, \\ querying_user): \"\"\" Query data is returned", "### Import create_app because this function is run by the worker from app", "len(obj_dicts.keys()), result_all=obj_dicts ) db.session.add(result) db.session.commit() return result.id @bp.route('/query/<query_type>', methods=['GET', 'POST']) @login_required def make_a_query(query_type):", "result.result_all.values()]) ### Return different pages for different queries if result.query_type == 'affil_papers': return", "from_year, locations, affils, n_authors, timeit_start, api_key) timeit_end = time.time() #print(f'`author_papers_w_location` for \"{query}\" from", "else: return render_template('query_results/processing.html', job_key = job_key), 202 ####### @bp.route('/api/help/', methods = ['GET']) def", "True: return jsonify(out_dict) else: return out_dict @bp.route('/api/query/affil_papers/', methods = ['GET']) def query_affil_papers(query =", "render_template('edit_profile.html', title='Edit Profile', form=form) def run_query(query_type, query_text, \\ from_year, locations, affils, api_key, \\", "if result.query_type == 'affil_papers': return render_template('query_results/affil_papers.html', \\ data = result.result_all, n_results = n_results,", "== 'affil_papers': affil_dicts = query_affil_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data,", "if not api_key: no_key_dict = {'error' : 'Please supply an API key to", "def make_a_query(query_type): \"\"\" \"\"\" if query_type == 'author_papers': form = authorIndexQueryForm() elif query_type", "user = User.query.filter_by(username=username).first_or_404() return render_template('user.html', user=user) @bp.route('/edit_profile', methods=['GET', 'POST']) @login_required def edit_profile(): form", "affils = form.affiliations.data, api_key = form.api_key.data, api_out = False) n_results = sum([affil_dict['total_count'] for", "request.args.get('n', 25) if request.args.get('affiliations', []): affils = request.args.get('affiliations', []) if request.args.get('api_key'): api_key =", "jsonify,current_app from flask_login import login_user, logout_user, current_user, login_required from config import Config from", "from app.main import bp from config import Config from app.main_api_functions import * from", "= locations, n_authors = 25, affils = affils, api_key = api_key, api_out =", "make_a_query(query_type): \"\"\" \"\"\" if query_type == 'author_papers': form = authorIndexQueryForm() elif query_type ==", "True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_author_papers_data(query, from_year, locations, affils, n_authors,", "if request.args.get('from'): from_year = int(request.args.get('from', 2000)) if request.args.get('locations'): locations = request.args.get('locations', []) if", "result = Result( query_type = query_type, query_text = query_text, query_from = from_year, query_affiliations", "api_key, api_out = False) elif query_type == 'affil_papers': obj_dicts = query_affil_papers(query = query_text,", "api_key) timeit_end = time.time() print(f'`query_author_papers` for \"{query}\" from {from_year} onward ran in {round(timeit_end", "import bp from config import Config from app.main_api_functions import * from rq.job import", "= Result( query_type = query_type, query_text = query_text, query_from = from_year, query_affiliations =", "api_out == True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_author_papers_data(query, from_year, locations,", ": '', 'n' : ''}, 'info' : ''} }, 'general_notes' : '<NAME>'} @bp.route('/api/query/author_papers/',", "= current_app.task_queue.enqueue_call( func=run_query, args=(query_type, form.query_text.data, form.query_from.data, form.locations.data, form.affiliations.data, form.api_key.data, current_user.username), result_ttl=current_app.config['RESULT_TTL'], timeout=current_app.config['WORKER_TIMEOUT']) flash(f'Your", "result.query_text, query_from = result.query_from , query_location = result.query_locations, query_affiliations = result.query_affiliations) n_results =", "\\ data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 elif result.query_type", "= affils, api_key = api_key, api_out = False) result = Result( query_type =", "locations, affils, api_key, \\ querying_user): \"\"\" Query data is returned in a nested", "for different queries if result.query_type == 'affil_papers': return render_template('query_results/affil_papers.html', \\ data = result.result_all,", "request.args.get('query'): query = request.args.get('query') if request.args.get('from'): from_year = int(request.args.get('from', 2000)) if request.args.get('locations'): locations", "else: return out_dict @bp.route('/api/query/affil_papers/', methods = ['GET']) def query_affil_papers(query = \"\", from_year =", "== 'author_papers': author_dicts = query_author_papers(query = form.query_text.data, from_year = form.query_from.data, locations = form.locations.data,", "re import ast import datetime import pandas as pd from collections import Counter", "jsonify(no_key_dict) else: return no_key_dict out_dict = query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key)", "form.username.data = current_user.username form.about_me.data = current_user.about_me return render_template('edit_profile.html', title='Edit Profile', form=form) def run_query(query_type,", "return render_template('query_results/processing.html', job_key = job_key), 202 ####### @bp.route('/api/help/', methods = ['GET']) def help():", "current_app.config['ASYNC_FUNC']: from app.main.routes import run_query ### If async == True, queue a task", "job_key = job_key), 202 ####### @bp.route('/api/help/', methods = ['GET']) def help(): return {'endpoints'", "@bp.route('/index', methods=['GET', 'POST']) def index(): return render_template('index.html') @bp.route('/user/<username>') @login_required def user(username): user =", "= result.result_all, n_results = n_results, unique_results = result.length_of_results), 200 elif result.query_type == 'author_papers':", "200 ### Refresh if job is still processing else: return render_template('query_results/processing.html', job_key =", "run by the worker from app import create_app from app.models import Result app", "data = affil_dicts, n_results = n_results, unique_results = length_of_results), 200 elif query_type ==", "affils = \"\", api_key = \"\", api_out = True): timeit_start = time.time() \"\"\"if", "'POST']) @login_required def edit_profile(): form = EditProfileForm(current_user.username) if form.validate_on_submit(): current_user.username = form.username.data current_user.about_me", "author_dicts.values()]) length_of_results = len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\ data = author_dicts, n_results = n_results,", "== 'affil_papers': form = authorIndexQueryForm() if form.validate_on_submit(): if current_app.config['ASYNC_FUNC']: from app.main.routes import run_query", "from_year, query_affiliations = affils, query_locations= locations, user_querying = querying_user, length_of_results = len(obj_dicts.keys()), result_all=obj_dicts", "timeit_start = time.time() #if request.args.get('query'): # query = request.args.get('query') #if request.args.get('from'): # from_year", "under!'} if api_out == True: return jsonify(no_key_dict) else: return no_key_dict out_dict = query_affil_papers_data(query,", "return render_template('query_results/affil_papers.html', \\ data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200", "Return results if job.is_finished and job.result: result = Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return render_template('errors/data_error.html',", "'GET': form.username.data = current_user.username form.about_me.data = current_user.about_me return render_template('edit_profile.html', title='Edit Profile', form=form) def", ": '', 'from' : '', 'locations' : '', 'n' : ''}, 'info' :", "methods = ['GET']) def query_affil_papers(query = \"\", from_year = \"\", locations = \"\",", "202 ####### @bp.route('/api/help/', methods = ['GET']) def help(): return {'endpoints' : {'/api/query/author_affils/' :", "in {round(timeit_end - timeit_start,4)} seconds. Returning results.') if api_out == True: return jsonify(out_dict)", "#if request.args.get('from'): # from_year = int(request.args.get('from', 2000)) #if request.args.get('locations'): # locations = request.args.get('locations',", "form.locations.data, n_authors = 25, affils = form.affiliations.data, api_key = form.api_key.data, api_out = False)", "{round(timeit_end - timeit_start,4)} seconds. Returning results.') print(f'`query_affil_papers` for \"{query}\" from {from_year} onward ran", "len(author_dicts.keys()) return render_template('query_results/author_papers.html', \\ data = author_dicts, n_results = n_results, unique_results = length_of_results),", "True, queue a task with the args from the form job = current_app.task_queue.enqueue_call(", "200 elif query_type == 'author_papers': author_dicts = query_author_papers(query = form.query_text.data, from_year = form.query_from.data,", "api_key = request.args.get('api_key') if locations: locations = [location.strip().lower() for location in locations.split(',')] if", "request.args.get('affiliations', []) #if request.args.get('api_key'): # api_key = request.args.get('api_key') if locations: locations = [location.strip().lower()", "\\ ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm from app.models import User, Result from app.email import send_password_reset_email", "job.result: result = Result.query.filter_by(id=job.result).first() if result.result_all.get('error'): return render_template('errors/data_error.html', data = result.result_all.get('error'), query_text =" ]
[ "= cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1) images_in_folder = [x for", "img) cv2.waitKey(1) images_in_folder = [x for x in os.listdir(imagesFolder) if x.endswith('.jpg')] for image", "images_in_folder: time_begin = time() img = cv2.imread(os.path.join(imagesFolder,image)) show_image(img) secs_to_sleep = frameIntervalSec - (time()-time_begin)", "cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1) images_in_folder = [x for x in os.listdir(imagesFolder) if", "os from time import time, sleep imagesFolder = sys.argv[1] frameIntervalSec = 1.0/10 def", "scale != 1.0: img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1)", "scale and scale != 1.0: img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name,", "x.endswith('.jpg')] for image in images_in_folder: time_begin = time() img = cv2.imread(os.path.join(imagesFolder,image)) show_image(img) secs_to_sleep", "from time import time, sleep imagesFolder = sys.argv[1] frameIntervalSec = 1.0/10 def show_image(img,", "img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1) images_in_folder = [x", "interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1) images_in_folder = [x for x in os.listdir(imagesFolder)", "image in images_in_folder: time_begin = time() img = cv2.imread(os.path.join(imagesFolder,image)) show_image(img) secs_to_sleep = frameIntervalSec", "= time() img = cv2.imread(os.path.join(imagesFolder,image)) show_image(img) secs_to_sleep = frameIntervalSec - (time()-time_begin) if secs_to_sleep>0:", "cv2 import sys import os from time import time, sleep imagesFolder = sys.argv[1]", "= sys.argv[1] frameIntervalSec = 1.0/10 def show_image(img, name = \"image\", scale = 1.0,", "frameIntervalSec = 1.0/10 def show_image(img, name = \"image\", scale = 1.0, newsize =", "time, sleep imagesFolder = sys.argv[1] frameIntervalSec = 1.0/10 def show_image(img, name = \"image\",", "newsize = None): if scale and scale != 1.0: img = cv2.resize(img, newsize,", "and scale != 1.0: img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img)", "x in os.listdir(imagesFolder) if x.endswith('.jpg')] for image in images_in_folder: time_begin = time() img", "show_image(img, name = \"image\", scale = 1.0, newsize = None): if scale and", "for x in os.listdir(imagesFolder) if x.endswith('.jpg')] for image in images_in_folder: time_begin = time()", "cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1) images_in_folder = [x for x in os.listdir(imagesFolder) if x.endswith('.jpg')]", "1.0, newsize = None): if scale and scale != 1.0: img = cv2.resize(img,", "= 1.0/10 def show_image(img, name = \"image\", scale = 1.0, newsize = None):", "1.0/10 def show_image(img, name = \"image\", scale = 1.0, newsize = None): if", "scale = 1.0, newsize = None): if scale and scale != 1.0: img", "in images_in_folder: time_begin = time() img = cv2.imread(os.path.join(imagesFolder,image)) show_image(img) secs_to_sleep = frameIntervalSec -", "time_begin = time() img = cv2.imread(os.path.join(imagesFolder,image)) show_image(img) secs_to_sleep = frameIntervalSec - (time()-time_begin) if", "if x.endswith('.jpg')] for image in images_in_folder: time_begin = time() img = cv2.imread(os.path.join(imagesFolder,image)) show_image(img)", "import time, sleep imagesFolder = sys.argv[1] frameIntervalSec = 1.0/10 def show_image(img, name =", "os.listdir(imagesFolder) if x.endswith('.jpg')] for image in images_in_folder: time_begin = time() img = cv2.imread(os.path.join(imagesFolder,image))", "= None): if scale and scale != 1.0: img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC)", "sys.argv[1] frameIntervalSec = 1.0/10 def show_image(img, name = \"image\", scale = 1.0, newsize", "sleep imagesFolder = sys.argv[1] frameIntervalSec = 1.0/10 def show_image(img, name = \"image\", scale", "if scale and scale != 1.0: img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE)", "\"image\", scale = 1.0, newsize = None): if scale and scale != 1.0:", "[x for x in os.listdir(imagesFolder) if x.endswith('.jpg')] for image in images_in_folder: time_begin =", "sys import os from time import time, sleep imagesFolder = sys.argv[1] frameIntervalSec =", "for image in images_in_folder: time_begin = time() img = cv2.imread(os.path.join(imagesFolder,image)) show_image(img) secs_to_sleep =", "def show_image(img, name = \"image\", scale = 1.0, newsize = None): if scale", "= 1.0, newsize = None): if scale and scale != 1.0: img =", "import cv2 import sys import os from time import time, sleep imagesFolder =", "newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1) images_in_folder = [x for x in", "name = \"image\", scale = 1.0, newsize = None): if scale and scale", "<reponame>YingshuLu/self-driving-formula-racing<filename>auto_drive/rule_drive/ReplayRace.py import cv2 import sys import os from time import time, sleep imagesFolder", "import sys import os from time import time, sleep imagesFolder = sys.argv[1] frameIntervalSec", "= [x for x in os.listdir(imagesFolder) if x.endswith('.jpg')] for image in images_in_folder: time_begin", "in os.listdir(imagesFolder) if x.endswith('.jpg')] for image in images_in_folder: time_begin = time() img =", "cv2.imshow(name, img) cv2.waitKey(1) images_in_folder = [x for x in os.listdir(imagesFolder) if x.endswith('.jpg')] for", "cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1) images_in_folder = [x for x", "time() img = cv2.imread(os.path.join(imagesFolder,image)) show_image(img) secs_to_sleep = frameIntervalSec - (time()-time_begin) if secs_to_sleep>0: sleep(secs_to_sleep)", "cv2.waitKey(1) images_in_folder = [x for x in os.listdir(imagesFolder) if x.endswith('.jpg')] for image in", "= \"image\", scale = 1.0, newsize = None): if scale and scale !=", "None): if scale and scale != 1.0: img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name,", "time import time, sleep imagesFolder = sys.argv[1] frameIntervalSec = 1.0/10 def show_image(img, name", "!= 1.0: img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1) images_in_folder", "imagesFolder = sys.argv[1] frameIntervalSec = 1.0/10 def show_image(img, name = \"image\", scale =", "1.0: img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC) cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE) cv2.imshow(name, img) cv2.waitKey(1) images_in_folder =", "images_in_folder = [x for x in os.listdir(imagesFolder) if x.endswith('.jpg')] for image in images_in_folder:", "import os from time import time, sleep imagesFolder = sys.argv[1] frameIntervalSec = 1.0/10" ]
[ "of the task to the QP objective is: .. math:: \\\\| J \\\\Delta", "is :math:`e(q) \\\\in \\\\mathbb{R}^n`. Both depend on the configuration :math:`q` of the robot.", "\"\"\" Regulate joint angles to a desired posture, *i.e.* a vector of actuated", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "language governing permissions and # limitations under the License. \"\"\" Posture task specification.", "configuration.q)[7:] return (jacobian, self.gain * error) def compute_qp_objective( self, configuration: Configuration ) ->", "Create task. Args: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note:", "of its contact frames become singular, the posture task will drive the knees", "error) def compute_qp_objective( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the", "and normalizes task coordinates to the same unit. The unit of the overall", "vector in the configuration space. \"\"\" self.target_q = target_q.copy() def compute_task_dynamics( self, configuration:", "\\\\times n}`, with :math:`n` the dimension of the robot's tangent space, and the", "space. A posture task is typically used for regularization as it has a", "utf-8 -*- # # Copyright 2022 <NAME> # # Licensed under the Apache", "this file except in compliance with the License. # You may obtain a", ":math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q: Target vector in the configuration space. A posture task", "= configuration.tangent.eye[6:, :] error = (self.target_q - configuration.q)[7:] return (jacobian, self.gain * error)", "vector :math:`\\\\alpha e(q)` such that the task dynamics are: .. math:: J(q) \\\\Delta", "such that the task dynamics are: .. math:: J(q) \\\\Delta q = \\\\alpha", "and configuration. Returns: Pair :math:`(H, c)` of Hessian matrix and linear vector of", "return (H, c) def __repr__(self): \"\"\" Human-readable representation of the task. \"\"\" return", "Args: target_q: Target vector in the configuration space. \"\"\" self.target_q = target_q.copy() def", "instance, when Upkie's legs are stretched and the Jacobian of its contact frames", "Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the matrix :math:`J(q)` and vector :math:`\\\\alpha", "Returns: Pair :math:`(H, c)` of Hessian matrix and linear vector of the QP", "configuration space. A posture task is typically used for regularization as it has", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "ANY KIND, either express or implied. # See the License for the specific", "assume that the first seven coordinates of the configuration are for the floating", "cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q: Target vector in the configuration space. A", "a vector of actuated joint angles. Floating base coordinates are not affected by", "target pose in the world frame. Args: target_q: Target vector in the configuration", "the Hessian matrix :math:`H` and linear vector :math:`c` such that the contribution of", "under the License. \"\"\" Posture task specification. \"\"\" from typing import Optional, Tuple", "the configuration space. \"\"\" self.target_q = target_q.copy() def compute_task_dynamics( self, configuration: Configuration )", "jacobian = configuration.tangent.eye[6:, :] error = (self.target_q - configuration.q)[7:] return (jacobian, self.gain *", "(jacobian, self.gain * error) def compute_qp_objective( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]:", "weighted_error = self.cost * error # [cost] H = weighted_jacobian.T @ weighted_jacobian c", "base joint jacobian = configuration.tangent.eye[6:, :] error = (self.target_q - configuration.q)[7:] return (jacobian,", "dimension of the robot's tangent space, and the error vector is :math:`e(q) \\\\in", "* error # [cost] H = weighted_jacobian.T @ weighted_jacobian c = -weighted_error.T @", "configuration :math:`q` of the robot. See :func:`Task.compute_task_dynamics` for more context. Args: configuration: Robot", "QP objective is: .. math:: \\\\| J \\\\Delta q - \\\\alpha e \\\\|_{W}^2", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "Jacobian matrix is :math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times n}`, with :math:`n` the dimension of", "is :math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times n}`, with :math:`n` the dimension of the robot's", "task. Attributes: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q: Target", "= None def set_target(self, target_q: np.ndarray) -> None: \"\"\" Set task target pose", "desired posture, *i.e.* a vector of actuated joint angles. Floating base coordinates are", "compute_qp_objective( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the Hessian matrix", "numpy as np from ..configuration import Configuration from .exceptions import TargetNotSet from .task", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "permissions and # limitations under the License. \"\"\" Posture task specification. \"\"\" from", "cost: float target_q: Optional[np.ndarray] def __init__(self, cost: float) -> None: \"\"\" Create task.", "frame. \"\"\" if self.target_q is None: raise TargetNotSet(\"no posture target\") # TODO(scaron): handle", "specification. \"\"\" from typing import Optional, Tuple import numpy as np from ..configuration", "-*- # # Copyright 2022 <NAME> # # Licensed under the Apache License,", "OF ANY KIND, either express or implied. # See the License for the", "frames become singular, the posture task will drive the knees toward a preferred", "\"\"\" Compute the Hessian matrix :math:`H` and linear vector :math:`c` such that the", "configuration. Returns: Pair :math:`(H, c)` of Hessian matrix and linear vector of the", "the matrix :math:`J(q)` and vector :math:`\\\\alpha e(q)` such that the task dynamics are:", ") -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the Hessian matrix :math:`H` and linear vector", "in the configuration space. \"\"\" self.target_q = target_q.copy() def compute_task_dynamics( self, configuration: Configuration", "objective is: .. math:: \\\\| J \\\\Delta q - \\\\alpha e \\\\|_{W}^2 =", "rank. For instance, when Upkie's legs are stretched and the Jacobian of its", "a commanded velocity). Args: robot: Robot model and configuration. Returns: Pair :math:`(H, c)`", "-> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the Hessian matrix :math:`H` and linear vector :math:`c`", "np.ndarray) -> None: \"\"\" Set task target pose in the world frame. Args:", "the body frame. \"\"\" if self.target_q is None: raise TargetNotSet(\"no posture target\") #", "None: raise TargetNotSet(\"no posture target\") # TODO(scaron): handle models without floating base joint", "TargetNotSet(\"no posture target\") # TODO(scaron): handle models without floating base joint jacobian =", "to the same unit. The unit of the overall contribution is :math:`[\\\\mathrm{cost}]^2`. The", "Args: robot: Robot model and configuration. Returns: Pair :math:`(H, c)` of Hessian matrix", "e)` of Jacobian matrix and error vector, both expressed in the body frame.", "See :func:`Task.compute_task_dynamics` for more context. Args: configuration: Robot configuration to read kinematics from.", "*i.e.* a vector of actuated joint angles. Floating base coordinates are not affected", "e \\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta q^T H \\\\Delta q + c^T q The", "Robot model and configuration. Returns: Pair :math:`(H, c)` of Hessian matrix and linear", "weighted_jacobian = self.cost * jacobian # [cost] weighted_error = self.cost * error #", "configuration space. \"\"\" self.target_q = target_q.copy() def compute_task_dynamics( self, configuration: Configuration ) ->", "weight matrix :math:`W \\\\in \\\\mathbb{R}^{n \\\\times n}` weighs and normalizes task coordinates to", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "c) def __repr__(self): \"\"\" Human-readable representation of the task. \"\"\" return f\"PostureTask(cost={self.cost}, gain={self.gain})\"", "The configuration displacement :math:`\\\\Delta q` is the output of inverse kinematics (we divide", "the QP objective is: .. math:: \\\\| J \\\\Delta q - \\\\alpha e", "it has a steady rank. For instance, when Upkie's legs are stretched and", "# Copyright 2022 <NAME> # # Licensed under the Apache License, Version 2.0", "jacobian # [cost] weighted_error = self.cost * error # [cost] H = weighted_jacobian.T", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "configuration to read kinematics from. Returns: Pair :math:`(J, \\\\alpha e)` of Jacobian matrix", "kinematics (we divide it by :math:`\\\\Delta t` to get a commanded velocity). Args:", "target_q: Optional[np.ndarray] def __init__(self, cost: float) -> None: \"\"\" Create task. Args: cost:", "__init__(self, cost: float) -> None: \"\"\" Create task. Args: cost: joint angular error", "= -weighted_error.T @ weighted_jacobian return (H, c) def __repr__(self): \"\"\" Human-readable representation of", "= weighted_jacobian.T @ weighted_jacobian c = -weighted_error.T @ weighted_jacobian return (H, c) def", "unit of the overall contribution is :math:`[\\\\mathrm{cost}]^2`. The configuration displacement :math:`\\\\Delta q` is", "task specification. \"\"\" from typing import Optional, Tuple import numpy as np from", "are for the floating base. \"\"\" self.cost = cost self.target_q = None def", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "math:: J(q) \\\\Delta q = \\\\alpha e(q) The Jacobian matrix is :math:`J(q) \\\\in", "Target vector in the configuration space. A posture task is typically used for", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "\\\\in \\\\mathbb{R}^{n \\\\times n}`, with :math:`n` the dimension of the robot's tangent space,", "2022 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "- \\\\alpha e \\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta q^T H \\\\Delta q + c^T", "\\\\mathbb{R}^n`. Both depend on the configuration :math:`q` of the robot. See :func:`Task.compute_task_dynamics` for", "q = \\\\alpha e(q) The Jacobian matrix is :math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times n}`,", "required by applicable law or agreed to in writing, software # distributed under", "task dynamics are: .. math:: J(q) \\\\Delta q = \\\\alpha e(q) The Jacobian", "-weighted_error.T @ weighted_jacobian return (H, c) def __repr__(self): \"\"\" Human-readable representation of the", "divide it by :math:`\\\\Delta t` to get a commanded velocity). Args: robot: Robot", "applicable law or agreed to in writing, software # distributed under the License", "* error) def compute_qp_objective( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute", "Jacobian matrix and error vector, both expressed in the body frame. \"\"\" if", "self.target_q is None: raise TargetNotSet(\"no posture target\") # TODO(scaron): handle models without floating", "of actuated joint angles. Floating base coordinates are not affected by this task.", "Hessian matrix and linear vector of the QP objective. \"\"\" jacobian, error =", "in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q: Target vector in the configuration space. A posture", "in the configuration space. A posture task is typically used for regularization as", "stretched and the Jacobian of its contact frames become singular, the posture task", "vector, both expressed in the body frame. \"\"\" if self.target_q is None: raise", "@ weighted_jacobian c = -weighted_error.T @ weighted_jacobian return (H, c) def __repr__(self): \"\"\"", "the configuration :math:`q` of the robot. See :func:`Task.compute_task_dynamics` for more context. Args: configuration:", "raise TargetNotSet(\"no posture target\") # TODO(scaron): handle models without floating base joint jacobian", "or agreed to in writing, software # distributed under the License is distributed", "the same unit. The unit of the overall contribution is :math:`[\\\\mathrm{cost}]^2`. The configuration", "np.ndarray]: \"\"\" Compute the matrix :math:`J(q)` and vector :math:`\\\\alpha e(q)` such that the", "\\\\Delta q = \\\\alpha e(q) The Jacobian matrix is :math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times", "H \\\\Delta q + c^T q The weight matrix :math:`W \\\\in \\\\mathbb{R}^{n \\\\times", "as np from ..configuration import Configuration from .exceptions import TargetNotSet from .task import", "[\\\\mathrm{rad}]`. target_q: Target vector in the configuration space. A posture task is typically", "of Jacobian matrix and error vector, both expressed in the body frame. \"\"\"", "coordinates to the same unit. The unit of the overall contribution is :math:`[\\\\mathrm{cost}]^2`.", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "float) -> None: \"\"\" Create task. Args: cost: joint angular error cost in", "/ [\\\\mathrm{rad}]`. target_q: Target vector in the configuration space. A posture task is", "\\\\Delta q^T H \\\\Delta q + c^T q The weight matrix :math:`W \\\\in", "commanded velocity). Args: robot: Robot model and configuration. Returns: Pair :math:`(H, c)` of", "\\\\in \\\\mathbb{R}^n`. Both depend on the configuration :math:`q` of the robot. See :func:`Task.compute_task_dynamics`", "floating base. \"\"\" self.cost = cost self.target_q = None def set_target(self, target_q: np.ndarray)", "Hessian matrix :math:`H` and linear vector :math:`c` such that the contribution of the", "on the configuration :math:`q` of the robot. See :func:`Task.compute_task_dynamics` for more context. Args:", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "task target pose in the world frame. Args: target_q: Target vector in the", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "License. # You may obtain a copy of the License at # #", "from .exceptions import TargetNotSet from .task import Task class PostureTask(Task): \"\"\" Regulate joint", "angles to a desired posture, *i.e.* a vector of actuated joint angles. Floating", "seven coordinates of the configuration are for the floating base. \"\"\" self.cost =", "space. \"\"\" self.target_q = target_q.copy() def compute_task_dynamics( self, configuration: Configuration ) -> Tuple[np.ndarray,", "= self.compute_task_dynamics(configuration) weighted_jacobian = self.cost * jacobian # [cost] weighted_error = self.cost *", "= self.cost * error # [cost] H = weighted_jacobian.T @ weighted_jacobian c =", "the robot's tangent space, and the error vector is :math:`e(q) \\\\in \\\\mathbb{R}^n`. Both", "* jacobian # [cost] weighted_error = self.cost * error # [cost] H =", "compliance with the License. # You may obtain a copy of the License", "def compute_task_dynamics( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the matrix", "of the overall contribution is :math:`[\\\\mathrm{cost}]^2`. The configuration displacement :math:`\\\\Delta q` is the", "- configuration.q)[7:] return (jacobian, self.gain * error) def compute_qp_objective( self, configuration: Configuration )", "normalizes task coordinates to the same unit. The unit of the overall contribution", "self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the matrix :math:`J(q)` and", "are not affected by this task. Attributes: cost: joint angular error cost in", "affected by this task. Attributes: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] /", "TargetNotSet from .task import Task class PostureTask(Task): \"\"\" Regulate joint angles to a", "singular, the posture task will drive the knees toward a preferred orientation. \"\"\"", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", ".exceptions import TargetNotSet from .task import Task class PostureTask(Task): \"\"\" Regulate joint angles", "J \\\\Delta q - \\\\alpha e \\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta q^T H \\\\Delta", "in the world frame. Args: target_q: Target vector in the configuration space. \"\"\"", "matrix and error vector, both expressed in the body frame. \"\"\" if self.target_q", "configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the Hessian matrix :math:`H` and", "inverse kinematics (we divide it by :math:`\\\\Delta t` to get a commanded velocity).", "matrix :math:`J(q)` and vector :math:`\\\\alpha e(q)` such that the task dynamics are: ..", "Posture task specification. \"\"\" from typing import Optional, Tuple import numpy as np", "the first seven coordinates of the configuration are for the floating base. \"\"\"", "(H, c) def __repr__(self): \"\"\" Human-readable representation of the task. \"\"\" return f\"PostureTask(cost={self.cost},", "not use this file except in compliance with the License. # You may", "\"\"\" jacobian, error = self.compute_task_dynamics(configuration) weighted_jacobian = self.cost * jacobian # [cost] weighted_error", "Regulate joint angles to a desired posture, *i.e.* a vector of actuated joint", "import numpy as np from ..configuration import Configuration from .exceptions import TargetNotSet from", "vector in the configuration space. A posture task is typically used for regularization", ":math:`(J, \\\\alpha e)` of Jacobian matrix and error vector, both expressed in the", "joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note: We assume that the", "target_q: Target vector in the configuration space. \"\"\" self.target_q = target_q.copy() def compute_task_dynamics(", "= self.cost * jacobian # [cost] weighted_error = self.cost * error # [cost]", "License, Version 2.0 (the \"License\"); # you may not use this file except", "joint angles. Floating base coordinates are not affected by this task. Attributes: cost:", ":math:`n` the dimension of the robot's tangent space, and the error vector is", "\"\"\" if self.target_q is None: raise TargetNotSet(\"no posture target\") # TODO(scaron): handle models", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "contact frames become singular, the posture task will drive the knees toward a", "objective. \"\"\" jacobian, error = self.compute_task_dynamics(configuration) weighted_jacobian = self.cost * jacobian # [cost]", "robot: Robot model and configuration. Returns: Pair :math:`(H, c)` of Hessian matrix and", "used for regularization as it has a steady rank. For instance, when Upkie's", "by :math:`\\\\Delta t` to get a commanded velocity). Args: robot: Robot model and", "contribution is :math:`[\\\\mathrm{cost}]^2`. The configuration displacement :math:`\\\\Delta q` is the output of inverse", "is typically used for regularization as it has a steady rank. For instance,", "Args: configuration: Robot configuration to read kinematics from. Returns: Pair :math:`(J, \\\\alpha e)`", ":math:`\\\\Delta q` is the output of inverse kinematics (we divide it by :math:`\\\\Delta", "error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q: Target vector in the configuration space.", "configuration: Robot configuration to read kinematics from. Returns: Pair :math:`(J, \\\\alpha e)` of", "with :math:`n` the dimension of the robot's tangent space, and the error vector", "the knees toward a preferred orientation. \"\"\" cost: float target_q: Optional[np.ndarray] def __init__(self,", "# you may not use this file except in compliance with the License.", "float target_q: Optional[np.ndarray] def __init__(self, cost: float) -> None: \"\"\" Create task. Args:", "Set task target pose in the world frame. Args: target_q: Target vector in", "the dimension of the robot's tangent space, and the error vector is :math:`e(q)", ":func:`Task.compute_task_dynamics` for more context. Args: configuration: Robot configuration to read kinematics from. Returns:", "agreed to in writing, software # distributed under the License is distributed on", "n}` weighs and normalizes task coordinates to the same unit. The unit of", "the configuration space. A posture task is typically used for regularization as it", ":math:`\\\\Delta t` to get a commanded velocity). Args: robot: Robot model and configuration.", "self.cost * error # [cost] H = weighted_jacobian.T @ weighted_jacobian c = -weighted_error.T", "the overall contribution is :math:`[\\\\mathrm{cost}]^2`. The configuration displacement :math:`\\\\Delta q` is the output", "(the \"License\"); # you may not use this file except in compliance with", "will drive the knees toward a preferred orientation. \"\"\" cost: float target_q: Optional[np.ndarray]", ":] error = (self.target_q - configuration.q)[7:] return (jacobian, self.gain * error) def compute_qp_objective(", "the error vector is :math:`e(q) \\\\in \\\\mathbb{R}^n`. Both depend on the configuration :math:`q`", "error = (self.target_q - configuration.q)[7:] return (jacobian, self.gain * error) def compute_qp_objective( self,", "Args: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note: We assume", "Pair :math:`(J, \\\\alpha e)` of Jacobian matrix and error vector, both expressed in", "self.cost = cost self.target_q = None def set_target(self, target_q: np.ndarray) -> None: \"\"\"", "# Unless required by applicable law or agreed to in writing, software #", "\\\\mathbb{R}^{n \\\\times n}` weighs and normalizes task coordinates to the same unit. The", "for regularization as it has a steady rank. For instance, when Upkie's legs", "by applicable law or agreed to in writing, software # distributed under the", "None: \"\"\" Set task target pose in the world frame. Args: target_q: Target", "c = -weighted_error.T @ weighted_jacobian return (H, c) def __repr__(self): \"\"\" Human-readable representation", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "the Jacobian of its contact frames become singular, the posture task will drive", "vector is :math:`e(q) \\\\in \\\\mathbb{R}^n`. Both depend on the configuration :math:`q` of the", "of the robot. See :func:`Task.compute_task_dynamics` for more context. Args: configuration: Robot configuration to", "from typing import Optional, Tuple import numpy as np from ..configuration import Configuration", "\"\"\" from typing import Optional, Tuple import numpy as np from ..configuration import", "such that the contribution of the task to the QP objective is: ..", ".task import Task class PostureTask(Task): \"\"\" Regulate joint angles to a desired posture,", "(self.target_q - configuration.q)[7:] return (jacobian, self.gain * error) def compute_qp_objective( self, configuration: Configuration", "q + c^T q The weight matrix :math:`W \\\\in \\\\mathbb{R}^{n \\\\times n}` weighs", "matrix :math:`H` and linear vector :math:`c` such that the contribution of the task", "# [cost] H = weighted_jacobian.T @ weighted_jacobian c = -weighted_error.T @ weighted_jacobian return", "self.target_q = target_q.copy() def compute_task_dynamics( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\"", "return (jacobian, self.gain * error) def compute_qp_objective( self, configuration: Configuration ) -> Tuple[np.ndarray,", "overall contribution is :math:`[\\\\mathrm{cost}]^2`. The configuration displacement :math:`\\\\Delta q` is the output of", ":math:`(H, c)` of Hessian matrix and linear vector of the QP objective. \"\"\"", "joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q: Target vector in the", "(we divide it by :math:`\\\\Delta t` to get a commanded velocity). Args: robot:", "cost: float) -> None: \"\"\" Create task. Args: cost: joint angular error cost", "file except in compliance with the License. # You may obtain a copy", "limitations under the License. \"\"\" Posture task specification. \"\"\" from typing import Optional,", "jacobian, error = self.compute_task_dynamics(configuration) weighted_jacobian = self.cost * jacobian # [cost] weighted_error =", "Upkie's legs are stretched and the Jacobian of its contact frames become singular,", "License for the specific language governing permissions and # limitations under the License.", "of the configuration are for the floating base. \"\"\" self.cost = cost self.target_q", "to in writing, software # distributed under the License is distributed on an", "for the floating base. \"\"\" self.cost = cost self.target_q = None def set_target(self,", "cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note: We assume that the first seven coordinates", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "not affected by this task. Attributes: cost: joint angular error cost in :math:`[\\\\mathrm{cost}]", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", ".. math:: J(q) \\\\Delta q = \\\\alpha e(q) The Jacobian matrix is :math:`J(q)", "task coordinates to the same unit. The unit of the overall contribution is", "that the task dynamics are: .. math:: J(q) \\\\Delta q = \\\\alpha e(q)", "import TargetNotSet from .task import Task class PostureTask(Task): \"\"\" Regulate joint angles to", "target_q: np.ndarray) -> None: \"\"\" Set task target pose in the world frame.", "of inverse kinematics (we divide it by :math:`\\\\Delta t` to get a commanded", "\\\\Delta q + c^T q The weight matrix :math:`W \\\\in \\\\mathbb{R}^{n \\\\times n}`", ":math:`\\\\alpha e(q)` such that the task dynamics are: .. math:: J(q) \\\\Delta q", "the posture task will drive the knees toward a preferred orientation. \"\"\" cost:", "regularization as it has a steady rank. For instance, when Upkie's legs are", "are stretched and the Jacobian of its contact frames become singular, the posture", "orientation. \"\"\" cost: float target_q: Optional[np.ndarray] def __init__(self, cost: float) -> None: \"\"\"", "self.gain * error) def compute_qp_objective( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\"", "or implied. # See the License for the specific language governing permissions and", "in the body frame. \"\"\" if self.target_q is None: raise TargetNotSet(\"no posture target\")", "We assume that the first seven coordinates of the configuration are for the", "= \\\\alpha e(q) The Jacobian matrix is :math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times n}`, with", "self.compute_task_dynamics(configuration) weighted_jacobian = self.cost * jacobian # [cost] weighted_error = self.cost * error", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "# # Copyright 2022 <NAME> # # Licensed under the Apache License, Version", "output of inverse kinematics (we divide it by :math:`\\\\Delta t` to get a", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2022 <NAME> #", "matrix :math:`W \\\\in \\\\mathbb{R}^{n \\\\times n}` weighs and normalizes task coordinates to the", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "posture task is typically used for regularization as it has a steady rank.", "self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the Hessian matrix :math:`H`", "in writing, software # distributed under the License is distributed on an \"AS", "depend on the configuration :math:`q` of the robot. See :func:`Task.compute_task_dynamics` for more context.", "matrix and linear vector of the QP objective. \"\"\" jacobian, error = self.compute_task_dynamics(configuration)", "target\") # TODO(scaron): handle models without floating base joint jacobian = configuration.tangent.eye[6:, :]", "the contribution of the task to the QP objective is: .. math:: \\\\|", ":math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times n}`, with :math:`n` the dimension of the robot's tangent", "target_q: Target vector in the configuration space. A posture task is typically used", ":math:`c` such that the contribution of the task to the QP objective is:", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "Note: We assume that the first seven coordinates of the configuration are for", "body frame. \"\"\" if self.target_q is None: raise TargetNotSet(\"no posture target\") # TODO(scaron):", "Task class PostureTask(Task): \"\"\" Regulate joint angles to a desired posture, *i.e.* a", "from. Returns: Pair :math:`(J, \\\\alpha e)` of Jacobian matrix and error vector, both", "cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note: We assume that", "base coordinates are not affected by this task. Attributes: cost: joint angular error", "of Hessian matrix and linear vector of the QP objective. \"\"\" jacobian, error", ":math:`[\\\\mathrm{cost}]^2`. The configuration displacement :math:`\\\\Delta q` is the output of inverse kinematics (we", "A posture task is typically used for regularization as it has a steady", "[\\\\mathrm{rad}]`. Note: We assume that the first seven coordinates of the configuration are", "Tuple import numpy as np from ..configuration import Configuration from .exceptions import TargetNotSet", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "this task. Attributes: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q:", "self.cost * jacobian # [cost] weighted_error = self.cost * error # [cost] H", "the specific language governing permissions and # limitations under the License. \"\"\" Posture", "error # [cost] H = weighted_jacobian.T @ weighted_jacobian c = -weighted_error.T @ weighted_jacobian", "linear vector of the QP objective. \"\"\" jacobian, error = self.compute_task_dynamics(configuration) weighted_jacobian =", "Floating base coordinates are not affected by this task. Attributes: cost: joint angular", "Compute the Hessian matrix :math:`H` and linear vector :math:`c` such that the contribution", ":math:`e(q) \\\\in \\\\mathbb{R}^n`. Both depend on the configuration :math:`q` of the robot. See", "self.target_q = None def set_target(self, target_q: np.ndarray) -> None: \"\"\" Set task target", "coordinates are not affected by this task. Attributes: cost: joint angular error cost", "error = self.compute_task_dynamics(configuration) weighted_jacobian = self.cost * jacobian # [cost] weighted_error = self.cost", "configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the matrix :math:`J(q)` and vector", "q The weight matrix :math:`W \\\\in \\\\mathbb{R}^{n \\\\times n}` weighs and normalizes task", "Optional, Tuple import numpy as np from ..configuration import Configuration from .exceptions import", "c^T q The weight matrix :math:`W \\\\in \\\\mathbb{R}^{n \\\\times n}` weighs and normalizes", "= cost self.target_q = None def set_target(self, target_q: np.ndarray) -> None: \"\"\" Set", "more context. Args: configuration: Robot configuration to read kinematics from. Returns: Pair :math:`(J,", "\"\"\" Create task. Args: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`.", "from .task import Task class PostureTask(Task): \"\"\" Regulate joint angles to a desired", "e(q) The Jacobian matrix is :math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times n}`, with :math:`n` the", "use this file except in compliance with the License. # You may obtain", "-*- coding: utf-8 -*- # # Copyright 2022 <NAME> # # Licensed under", "for the specific language governing permissions and # limitations under the License. \"\"\"", "first seven coordinates of the configuration are for the floating base. \"\"\" self.cost", "# -*- coding: utf-8 -*- # # Copyright 2022 <NAME> # # Licensed", "[cost] H = weighted_jacobian.T @ weighted_jacobian c = -weighted_error.T @ weighted_jacobian return (H,", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "joint jacobian = configuration.tangent.eye[6:, :] error = (self.target_q - configuration.q)[7:] return (jacobian, self.gain", "angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q: Target vector in the configuration", ":math:`W \\\\in \\\\mathbb{R}^{n \\\\times n}` weighs and normalizes task coordinates to the same", "governing permissions and # limitations under the License. \"\"\" Posture task specification. \"\"\"", "2.0 (the \"License\"); # you may not use this file except in compliance", "floating base joint jacobian = configuration.tangent.eye[6:, :] error = (self.target_q - configuration.q)[7:] return", "Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the Hessian matrix :math:`H` and linear vector :math:`c` such", "TODO(scaron): handle models without floating base joint jacobian = configuration.tangent.eye[6:, :] error =", "\"\"\" Posture task specification. \"\"\" from typing import Optional, Tuple import numpy as", "to the QP objective is: .. math:: \\\\| J \\\\Delta q - \\\\alpha", "models without floating base joint jacobian = configuration.tangent.eye[6:, :] error = (self.target_q -", "# TODO(scaron): handle models without floating base joint jacobian = configuration.tangent.eye[6:, :] error", "is None: raise TargetNotSet(\"no posture target\") # TODO(scaron): handle models without floating base", "\\\\frac{1}{2} \\\\Delta q^T H \\\\Delta q + c^T q The weight matrix :math:`W", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "Both depend on the configuration :math:`q` of the robot. See :func:`Task.compute_task_dynamics` for more", "robot. See :func:`Task.compute_task_dynamics` for more context. Args: configuration: Robot configuration to read kinematics", "the task to the QP objective is: .. math:: \\\\| J \\\\Delta q", "robot's tangent space, and the error vector is :math:`e(q) \\\\in \\\\mathbb{R}^n`. Both depend", "# # Unless required by applicable law or agreed to in writing, software", "preferred orientation. \"\"\" cost: float target_q: Optional[np.ndarray] def __init__(self, cost: float) -> None:", "to read kinematics from. Returns: Pair :math:`(J, \\\\alpha e)` of Jacobian matrix and", "express or implied. # See the License for the specific language governing permissions", "get a commanded velocity). Args: robot: Robot model and configuration. Returns: Pair :math:`(H,", "velocity). Args: robot: Robot model and configuration. Returns: Pair :math:`(H, c)` of Hessian", ") -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the matrix :math:`J(q)` and vector :math:`\\\\alpha e(q)`", "either express or implied. # See the License for the specific language governing", "+ c^T q The weight matrix :math:`W \\\\in \\\\mathbb{R}^{n \\\\times n}` weighs and", "import Optional, Tuple import numpy as np from ..configuration import Configuration from .exceptions", "the License. \"\"\" Posture task specification. \"\"\" from typing import Optional, Tuple import", "[cost] weighted_error = self.cost * error # [cost] H = weighted_jacobian.T @ weighted_jacobian", "License. \"\"\" Posture task specification. \"\"\" from typing import Optional, Tuple import numpy", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "For instance, when Upkie's legs are stretched and the Jacobian of its contact", "become singular, the posture task will drive the knees toward a preferred orientation.", "and linear vector :math:`c` such that the contribution of the task to the", "\\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta q^T H \\\\Delta q + c^T q The weight", "J(q) \\\\Delta q = \\\\alpha e(q) The Jacobian matrix is :math:`J(q) \\\\in \\\\mathbb{R}^{n", "are: .. math:: J(q) \\\\Delta q = \\\\alpha e(q) The Jacobian matrix is", "a preferred orientation. \"\"\" cost: float target_q: Optional[np.ndarray] def __init__(self, cost: float) ->", "task will drive the knees toward a preferred orientation. \"\"\" cost: float target_q:", "import Task class PostureTask(Task): \"\"\" Regulate joint angles to a desired posture, *i.e.*", "knees toward a preferred orientation. \"\"\" cost: float target_q: Optional[np.ndarray] def __init__(self, cost:", "and the error vector is :math:`e(q) \\\\in \\\\mathbb{R}^n`. Both depend on the configuration", "cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q: Target vector in", "in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note: We assume that the first seven coordinates of", "the License. # You may obtain a copy of the License at #", "-> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the matrix :math:`J(q)` and vector :math:`\\\\alpha e(q)` such", "linear vector :math:`c` such that the contribution of the task to the QP", "Compute the matrix :math:`J(q)` and vector :math:`\\\\alpha e(q)` such that the task dynamics", "posture target\") # TODO(scaron): handle models without floating base joint jacobian = configuration.tangent.eye[6:,", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "joint angles to a desired posture, *i.e.* a vector of actuated joint angles.", "a steady rank. For instance, when Upkie's legs are stretched and the Jacobian", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "when Upkie's legs are stretched and the Jacobian of its contact frames become", "None: \"\"\" Create task. Args: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] /", "= \\\\frac{1}{2} \\\\Delta q^T H \\\\Delta q + c^T q The weight matrix", "specific language governing permissions and # limitations under the License. \"\"\" Posture task", "# limitations under the License. \"\"\" Posture task specification. \"\"\" from typing import", "it by :math:`\\\\Delta t` to get a commanded velocity). Args: robot: Robot model", "task is typically used for regularization as it has a steady rank. For", "n}`, with :math:`n` the dimension of the robot's tangent space, and the error", "q - \\\\alpha e \\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta q^T H \\\\Delta q +", "compute_task_dynamics( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the matrix :math:`J(q)`", "Optional[np.ndarray] def __init__(self, cost: float) -> None: \"\"\" Create task. Args: cost: joint", "\\\\alpha e \\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta q^T H \\\\Delta q + c^T q", "q` is the output of inverse kinematics (we divide it by :math:`\\\\Delta t`", "The unit of the overall contribution is :math:`[\\\\mathrm{cost}]^2`. The configuration displacement :math:`\\\\Delta q`", "\\\\mathbb{R}^{n \\\\times n}`, with :math:`n` the dimension of the robot's tangent space, and", "coding: utf-8 -*- # # Copyright 2022 <NAME> # # Licensed under the", "np from ..configuration import Configuration from .exceptions import TargetNotSet from .task import Task", "context. Args: configuration: Robot configuration to read kinematics from. Returns: Pair :math:`(J, \\\\alpha", "q^T H \\\\Delta q + c^T q The weight matrix :math:`W \\\\in \\\\mathbb{R}^{n", "with the License. # You may obtain a copy of the License at", "that the contribution of the task to the QP objective is: .. math::", "Pair :math:`(H, c)` of Hessian matrix and linear vector of the QP objective.", "pose in the world frame. Args: target_q: Target vector in the configuration space.", ".. math:: \\\\| J \\\\Delta q - \\\\alpha e \\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta", "read kinematics from. Returns: Pair :math:`(J, \\\\alpha e)` of Jacobian matrix and error", "vector of the QP objective. \"\"\" jacobian, error = self.compute_task_dynamics(configuration) weighted_jacobian = self.cost", "-> None: \"\"\" Create task. Args: cost: joint angular error cost in :math:`[\\\\mathrm{cost}]", "and error vector, both expressed in the body frame. \"\"\" if self.target_q is", "of the robot's tangent space, and the error vector is :math:`e(q) \\\\in \\\\mathbb{R}^n`.", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "Configuration from .exceptions import TargetNotSet from .task import Task class PostureTask(Task): \"\"\" Regulate", "cost self.target_q = None def set_target(self, target_q: np.ndarray) -> None: \"\"\" Set task", "the robot. See :func:`Task.compute_task_dynamics` for more context. Args: configuration: Robot configuration to read", "math:: \\\\| J \\\\Delta q - \\\\alpha e \\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta q^T", "\"\"\" self.target_q = target_q.copy() def compute_task_dynamics( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]:", "vector of actuated joint angles. Floating base coordinates are not affected by this", "and the Jacobian of its contact frames become singular, the posture task will", "kinematics from. Returns: Pair :math:`(J, \\\\alpha e)` of Jacobian matrix and error vector,", "import Configuration from .exceptions import TargetNotSet from .task import Task class PostureTask(Task): \"\"\"", "unit. The unit of the overall contribution is :math:`[\\\\mathrm{cost}]^2`. The configuration displacement :math:`\\\\Delta", "error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note: We assume that the first seven", "Copyright 2022 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "law or agreed to in writing, software # distributed under the License is", "/ [\\\\mathrm{rad}]`. Note: We assume that the first seven coordinates of the configuration", "the License for the specific language governing permissions and # limitations under the", "The Jacobian matrix is :math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times n}`, with :math:`n` the dimension", "configuration.tangent.eye[6:, :] error = (self.target_q - configuration.q)[7:] return (jacobian, self.gain * error) def", "has a steady rank. For instance, when Upkie's legs are stretched and the", "python3 # -*- coding: utf-8 -*- # # Copyright 2022 <NAME> # #", "# [cost] weighted_error = self.cost * error # [cost] H = weighted_jacobian.T @", "= (self.target_q - configuration.q)[7:] return (jacobian, self.gain * error) def compute_qp_objective( self, configuration:", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Jacobian of its contact frames become singular, the posture task will drive the", "handle models without floating base joint jacobian = configuration.tangent.eye[6:, :] error = (self.target_q", "weighted_jacobian.T @ weighted_jacobian c = -weighted_error.T @ weighted_jacobian return (H, c) def __repr__(self):", "..configuration import Configuration from .exceptions import TargetNotSet from .task import Task class PostureTask(Task):", "\\\\| J \\\\Delta q - \\\\alpha e \\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta q^T H", "if self.target_q is None: raise TargetNotSet(\"no posture target\") # TODO(scaron): handle models without", "the output of inverse kinematics (we divide it by :math:`\\\\Delta t` to get", "angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note: We assume that the first", "@ weighted_jacobian return (H, c) def __repr__(self): \"\"\" Human-readable representation of the task.", "e(q)` such that the task dynamics are: .. math:: J(q) \\\\Delta q =", "its contact frames become singular, the posture task will drive the knees toward", "t` to get a commanded velocity). Args: robot: Robot model and configuration. Returns:", "def set_target(self, target_q: np.ndarray) -> None: \"\"\" Set task target pose in the", "target_q.copy() def compute_task_dynamics( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the", "is :math:`[\\\\mathrm{cost}]^2`. The configuration displacement :math:`\\\\Delta q` is the output of inverse kinematics", "displacement :math:`\\\\Delta q` is the output of inverse kinematics (we divide it by", "in compliance with the License. # You may obtain a copy of the", "without floating base joint jacobian = configuration.tangent.eye[6:, :] error = (self.target_q - configuration.q)[7:]", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the floating base. \"\"\" self.cost = cost self.target_q = None def set_target(self, target_q:", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "configuration are for the floating base. \"\"\" self.cost = cost self.target_q = None", "that the first seven coordinates of the configuration are for the floating base.", "Robot configuration to read kinematics from. Returns: Pair :math:`(J, \\\\alpha e)` of Jacobian", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= target_q.copy() def compute_task_dynamics( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute", ":math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note: We assume that the first seven coordinates of the", "as it has a steady rank. For instance, when Upkie's legs are stretched", "None def set_target(self, target_q: np.ndarray) -> None: \"\"\" Set task target pose in", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "-> None: \"\"\" Set task target pose in the world frame. Args: target_q:", ":math:`J(q)` and vector :math:`\\\\alpha e(q)` such that the task dynamics are: .. math::", "error vector, both expressed in the body frame. \"\"\" if self.target_q is None:", "matrix is :math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times n}`, with :math:`n` the dimension of the", "typically used for regularization as it has a steady rank. For instance, when", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", ":math:`q` of the robot. See :func:`Task.compute_task_dynamics` for more context. Args: configuration: Robot configuration", "class PostureTask(Task): \"\"\" Regulate joint angles to a desired posture, *i.e.* a vector", "Attributes: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. target_q: Target vector", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "and vector :math:`\\\\alpha e(q)` such that the task dynamics are: .. math:: J(q)", "tangent space, and the error vector is :math:`e(q) \\\\in \\\\mathbb{R}^n`. Both depend on", "Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the matrix :math:`J(q)` and vector :math:`\\\\alpha e(q)` such that", "same unit. The unit of the overall contribution is :math:`[\\\\mathrm{cost}]^2`. The configuration displacement", "to get a commanded velocity). Args: robot: Robot model and configuration. Returns: Pair", "\"\"\" Compute the matrix :math:`J(q)` and vector :math:`\\\\alpha e(q)` such that the task", "is the output of inverse kinematics (we divide it by :math:`\\\\Delta t` to", "task to the QP objective is: .. math:: \\\\| J \\\\Delta q -", "a desired posture, *i.e.* a vector of actuated joint angles. Floating base coordinates", "typing import Optional, Tuple import numpy as np from ..configuration import Configuration from", "\\\\alpha e(q) The Jacobian matrix is :math:`J(q) \\\\in \\\\mathbb{R}^{n \\\\times n}`, with :math:`n`", "the QP objective. \"\"\" jacobian, error = self.compute_task_dynamics(configuration) weighted_jacobian = self.cost * jacobian", "weighted_jacobian c = -weighted_error.T @ weighted_jacobian return (H, c) def __repr__(self): \"\"\" Human-readable", "base. \"\"\" self.cost = cost self.target_q = None def set_target(self, target_q: np.ndarray) ->", "task. Args: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`. Note: We", "\\\\in \\\\mathbb{R}^{n \\\\times n}` weighs and normalizes task coordinates to the same unit.", "error vector is :math:`e(q) \\\\in \\\\mathbb{R}^n`. Both depend on the configuration :math:`q` of", "from ..configuration import Configuration from .exceptions import TargetNotSet from .task import Task class", "def __init__(self, cost: float) -> None: \"\"\" Create task. Args: cost: joint angular", "\"\"\" cost: float target_q: Optional[np.ndarray] def __init__(self, cost: float) -> None: \"\"\" Create", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "by this task. Attributes: cost: joint angular error cost in :math:`[\\\\mathrm{cost}] / [\\\\mathrm{rad}]`.", "dynamics are: .. math:: J(q) \\\\Delta q = \\\\alpha e(q) The Jacobian matrix", "of the QP objective. \"\"\" jacobian, error = self.compute_task_dynamics(configuration) weighted_jacobian = self.cost *", "coordinates of the configuration are for the floating base. \"\"\" self.cost = cost", "actuated joint angles. Floating base coordinates are not affected by this task. Attributes:", "\"\"\" self.cost = cost self.target_q = None def set_target(self, target_q: np.ndarray) -> None:", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "angles. Floating base coordinates are not affected by this task. Attributes: cost: joint", "space, and the error vector is :math:`e(q) \\\\in \\\\mathbb{R}^n`. Both depend on the", "PostureTask(Task): \"\"\" Regulate joint angles to a desired posture, *i.e.* a vector of", "the configuration are for the floating base. \"\"\" self.cost = cost self.target_q =", "H = weighted_jacobian.T @ weighted_jacobian c = -weighted_error.T @ weighted_jacobian return (H, c)", "posture, *i.e.* a vector of actuated joint angles. Floating base coordinates are not", "and # limitations under the License. \"\"\" Posture task specification. \"\"\" from typing", "model and configuration. Returns: Pair :math:`(H, c)` of Hessian matrix and linear vector", "steady rank. For instance, when Upkie's legs are stretched and the Jacobian of", "\\\\times n}` weighs and normalizes task coordinates to the same unit. The unit", "legs are stretched and the Jacobian of its contact frames become singular, the", "QP objective. \"\"\" jacobian, error = self.compute_task_dynamics(configuration) weighted_jacobian = self.cost * jacobian #", "both expressed in the body frame. \"\"\" if self.target_q is None: raise TargetNotSet(\"no", "Target vector in the configuration space. \"\"\" self.target_q = target_q.copy() def compute_task_dynamics( self,", "\\\\Delta q - \\\\alpha e \\\\|_{W}^2 = \\\\frac{1}{2} \\\\Delta q^T H \\\\Delta q", "for more context. Args: configuration: Robot configuration to read kinematics from. Returns: Pair", "Returns: Pair :math:`(J, \\\\alpha e)` of Jacobian matrix and error vector, both expressed", "set_target(self, target_q: np.ndarray) -> None: \"\"\" Set task target pose in the world", "Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the Hessian matrix :math:`H` and linear", "weighted_jacobian return (H, c) def __repr__(self): \"\"\" Human-readable representation of the task. \"\"\"", ":math:`H` and linear vector :math:`c` such that the contribution of the task to", "configuration displacement :math:`\\\\Delta q` is the output of inverse kinematics (we divide it", "weighs and normalizes task coordinates to the same unit. The unit of the", "np.ndarray]: \"\"\" Compute the Hessian matrix :math:`H` and linear vector :math:`c` such that", "The weight matrix :math:`W \\\\in \\\\mathbb{R}^{n \\\\times n}` weighs and normalizes task coordinates", "contribution of the task to the QP objective is: .. math:: \\\\| J", "drive the knees toward a preferred orientation. \"\"\" cost: float target_q: Optional[np.ndarray] def", "expressed in the body frame. \"\"\" if self.target_q is None: raise TargetNotSet(\"no posture", "to a desired posture, *i.e.* a vector of actuated joint angles. Floating base", "\"\"\" Set task target pose in the world frame. Args: target_q: Target vector", "the world frame. Args: target_q: Target vector in the configuration space. \"\"\" self.target_q", "the task dynamics are: .. math:: J(q) \\\\Delta q = \\\\alpha e(q) The", "frame. Args: target_q: Target vector in the configuration space. \"\"\" self.target_q = target_q.copy()", "c)` of Hessian matrix and linear vector of the QP objective. \"\"\" jacobian,", "vector :math:`c` such that the contribution of the task to the QP objective", "world frame. Args: target_q: Target vector in the configuration space. \"\"\" self.target_q =", "def compute_qp_objective( self, configuration: Configuration ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Compute the Hessian", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "\\\\alpha e)` of Jacobian matrix and error vector, both expressed in the body", "toward a preferred orientation. \"\"\" cost: float target_q: Optional[np.ndarray] def __init__(self, cost: float)", "posture task will drive the knees toward a preferred orientation. \"\"\" cost: float", "is: .. math:: \\\\| J \\\\Delta q - \\\\alpha e \\\\|_{W}^2 = \\\\frac{1}{2}", "and linear vector of the QP objective. \"\"\" jacobian, error = self.compute_task_dynamics(configuration) weighted_jacobian" ]
[ "= struct.pack(\"BBB\", 0, 0, b) # intentionally wrong data5 = struct.pack(\"BBB\", 0, g,", "j in range(8): cur |= (byte1 & (2**j)) << (j * 3 +", "wrong data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally wrong data5 = struct.pack(\"BBB\",", "& (2**j)) << (j * 7 + 0) cur |= (byte2 & (2**j))", "<< (j + 0) cur |= (byte2 & (2**j)) << (j + 1)", "math.sin(2. * math.pi * (5. * -x + n / 3.) / 100.)", "ord(data4[i]) byte5 = ord(data5[i]) byte6 = ord(data6[i]) byte7 = ord(data7[i]) byte8 = ord(data8[i])", "(2**j)) << (j * 7 + 7) f.write(struct.pack(\">Q\", cur)) else: # No interleaving", "(j + 1) f.write(struct.pack(\">H\", cur)) elif interleaved == 4: data1 = struct.pack(\"BBB\", r,", "(2**j)) << (j * 7 + 3) cur |= (byte5 & (2**j)) <<", "= open(\"test_{}.bin\".format(interleaved), \"wb\") for n in range(1000): for x in range(PIXELS): # This", "interleaved = 1 # interleaved = 2 # interleaved = 4 interleaved =", "0, 0, b) # intentionally wrong for i in range(len(data1)): cur = 0", "wrong data6 = struct.pack(\"BBB\", r, g, 0) # intentionally wrong data7 = struct.pack(\"BBB\",", "& (2**j)) << (j * 7 + 2) cur |= (byte4 & (2**j))", "intentionally wrong data5 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong data6 =", "we get a half \"rainbow\", easy to find breaks/seams hue = float(x +", "= ord(data7[i]) byte8 = ord(data8[i]) for j in range(8): cur |= (byte1 &", "0, 0, b) # intentionally wrong data5 = struct.pack(\"BBB\", 0, g, b) #", "struct.pack(\"BBB\", 0, g, b) # intentionally wrong for i in range(len(data1)): cur =", "|= (byte2 & (2**j)) << (j * 7 + 1) cur |= (byte3", "range(8): cur |= (byte1 & (2**j)) << (j * 7 + 0) cur", "= ord(data8[i]) for j in range(8): cur |= (byte1 & (2**j)) << (j", "(j * 7 + 4) cur |= (byte6 & (2**j)) << (j *", "(byte6 & (2**j)) << (j * 7 + 5) cur |= (byte7 &", "import struct import math PIXELS = 94 # interleaved = 1 # interleaved", "cur |= (byte8 & (2**j)) << (j * 7 + 7) f.write(struct.pack(\">Q\", cur))", "3) f.write(struct.pack(\">L\", cur)) elif interleaved == 8: data1 = struct.pack(\"BBB\", r, g, b)", "= 1 # interleaved = 2 # interleaved = 4 interleaved = 8", "n in range(1000): for x in range(PIXELS): # This way we get a", "(2**j)) << (j + 0) cur |= (byte2 & (2**j)) << (j +", "easy to find breaks/seams hue = float(x + n/10.) / PIXELS / 2", "intentionally wrong data8 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong for i", "ord(data5[i]) byte6 = ord(data6[i]) byte7 = ord(data7[i]) byte8 = ord(data8[i]) for j in", "|= (byte4 & (2**j)) << (j * 7 + 3) cur |= (byte5", "2 # interleaved = 4 interleaved = 8 f = open(\"test_{}.bin\".format(interleaved), \"wb\") for", "0, b) # intentionally wrong data5 = struct.pack(\"BBB\", 0, g, b) # intentionally", "* 7 + 6) cur |= (byte8 & (2**j)) << (j * 7", "<< (j * 3 + 0) cur |= (byte2 & (2**j)) << (j", "(j * 3 + 2) cur |= (byte4 & (2**j)) << (j *", "(j * 7 + 3) cur |= (byte5 & (2**j)) << (j *", "-x + n / 3.) / 100.) ) if interleaved == 2: data1", "& (2**j)) << (j * 7 + 3) cur |= (byte5 & (2**j))", "(byte2 & (2**j)) << (j * 7 + 1) cur |= (byte3 &", "& (2**j)) << (j + 0) cur |= (byte2 & (2**j)) << (j", "16 * math.sin(2. * math.pi * (5. * -x + n / 3.)", "5) cur |= (byte7 & (2**j)) << (j * 7 + 6) cur", "for j in range(8): cur |= (byte1 & (2**j)) << (j * 3", "(byte8 & (2**j)) << (j * 7 + 7) f.write(struct.pack(\">Q\", cur)) else: #", "byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) for j in range(8):", "ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) byte5 = ord(data5[i]) byte6 = ord(data6[i])", "|= (byte4 & (2**j)) << (j * 3 + 3) f.write(struct.pack(\">L\", cur)) elif", "interleaved == 8: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r,", "to find breaks/seams hue = float(x + n/10.) / PIXELS / 2 r,", "= ord(data3[i]) byte4 = ord(data4[i]) byte5 = ord(data5[i]) byte6 = ord(data6[i]) byte7 =", "b) # intentionally wrong for i in range(len(data1)): cur = 0 byte1 =", "(2**j)) << (j * 3 + 0) cur |= (byte2 & (2**j)) <<", "2 r, g, b = colorsys.hsv_to_rgb( hue, 1, 16 + 16 * math.sin(2.", "cur |= (byte3 & (2**j)) << (j * 3 + 2) cur |=", "struct.pack(\"BBB\", r, 0, 0) # intentionally wrong for i in range(len(data1)): cur =", "1 # interleaved = 2 # interleaved = 4 interleaved = 8 f", "(byte4 & (2**j)) << (j * 3 + 3) f.write(struct.pack(\">L\", cur)) elif interleaved", "math.pi * (5. * -x + n / 3.) / 100.) ) if", "4) cur |= (byte6 & (2**j)) << (j * 7 + 5) cur", "0) cur |= (byte2 & (2**j)) << (j * 7 + 1) cur", "data7 = struct.pack(\"BBB\", r, 0, b) # intentionally wrong data8 = struct.pack(\"BBB\", 0,", "<< (j * 7 + 3) cur |= (byte5 & (2**j)) << (j", "<< (j * 3 + 2) cur |= (byte4 & (2**j)) << (j", "(byte7 & (2**j)) << (j * 7 + 6) cur |= (byte8 &", "= 94 # interleaved = 1 # interleaved = 2 # interleaved =", "94 # interleaved = 1 # interleaved = 2 # interleaved = 4", "|= (byte2 & (2**j)) << (j + 1) f.write(struct.pack(\">H\", cur)) elif interleaved ==", "colorsys import struct import math PIXELS = 94 # interleaved = 1 #", "3.) / 100.) ) if interleaved == 2: data1 = struct.pack(\"BBB\", r, g,", "== 2: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r, 0,", "3 + 2) cur |= (byte4 & (2**j)) << (j * 3 +", "* 7 + 7) f.write(struct.pack(\">Q\", cur)) else: # No interleaving f.write(struct.pack(\"BBB\", r, g,", "r, g, b) data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally wrong for", "0, 0) # intentionally wrong data3 = struct.pack(\"BBB\", 0, g, 0) # intentionally", "ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) for j in range(8): cur |=", "& (2**j)) << (j * 3 + 0) cur |= (byte2 & (2**j))", "data6 = struct.pack(\"BBB\", r, g, 0) # intentionally wrong data7 = struct.pack(\"BBB\", r,", "= struct.pack(\"BBB\", r, g, 0) # intentionally wrong data7 = struct.pack(\"BBB\", r, 0,", "intentionally wrong data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally wrong data5 =", "colorsys.hsv_to_rgb( hue, 1, 16 + 16 * math.sin(2. * math.pi * (5. *", "byte5 = ord(data5[i]) byte6 = ord(data6[i]) byte7 = ord(data7[i]) byte8 = ord(data8[i]) for", "& (2**j)) << (j * 7 + 4) cur |= (byte6 & (2**j))", "0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i])", "interleaved = 2 # interleaved = 4 interleaved = 8 f = open(\"test_{}.bin\".format(interleaved),", "8: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r, 0, 0)", "ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) byte5 = ord(data5[i])", "= float(x + n/10.) / PIXELS / 2 r, g, b = colorsys.hsv_to_rgb(", "b) # intentionally wrong data5 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong", "for x in range(PIXELS): # This way we get a half \"rainbow\", easy", "8 f = open(\"test_{}.bin\".format(interleaved), \"wb\") for n in range(1000): for x in range(PIXELS):", "in range(8): cur |= (byte1 & (2**j)) << (j * 3 + 0)", "& (2**j)) << (j * 3 + 3) f.write(struct.pack(\">L\", cur)) elif interleaved ==", "= struct.pack(\"BBB\", r, 0, b) # intentionally wrong data8 = struct.pack(\"BBB\", 0, g,", "struct.pack(\"BBB\", r, 0, 0) # intentionally wrong data3 = struct.pack(\"BBB\", 0, g, 0)", "ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) for j in", "= 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) for j in range(8): cur", "2) cur |= (byte4 & (2**j)) << (j * 7 + 3) cur", "g, b) # intentionally wrong for i in range(len(data1)): cur = 0 byte1", "byte1 = ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) byte5", "= struct.pack(\"BBB\", 0, g, b) # intentionally wrong for i in range(len(data1)): cur", "+ 3) cur |= (byte5 & (2**j)) << (j * 7 + 4)", "data5 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong data6 = struct.pack(\"BBB\", r,", "b) data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally wrong for i in", "in range(PIXELS): # This way we get a half \"rainbow\", easy to find", "cur |= (byte1 & (2**j)) << (j * 3 + 0) cur |=", "# intentionally wrong data5 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong data6", "cur = 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) for j in range(8):", "(2**j)) << (j * 7 + 4) cur |= (byte6 & (2**j)) <<", "<< (j * 7 + 6) cur |= (byte8 & (2**j)) << (j", "# intentionally wrong data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally wrong for", "+ 4) cur |= (byte6 & (2**j)) << (j * 7 + 5)", "+ 1) cur |= (byte3 & (2**j)) << (j * 7 + 2)", "in range(len(data1)): cur = 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) for j", "/ PIXELS / 2 r, g, b = colorsys.hsv_to_rgb( hue, 1, 16 +", "elif interleaved == 4: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\",", "<< (j * 7 + 7) f.write(struct.pack(\">Q\", cur)) else: # No interleaving f.write(struct.pack(\"BBB\",", "3) cur |= (byte5 & (2**j)) << (j * 7 + 4) cur", "7 + 7) f.write(struct.pack(\">Q\", cur)) else: # No interleaving f.write(struct.pack(\"BBB\", r, g, b))", "hue = float(x + n/10.) / PIXELS / 2 r, g, b =", "= ord(data4[i]) byte5 = ord(data5[i]) byte6 = ord(data6[i]) byte7 = ord(data7[i]) byte8 =", "* 7 + 5) cur |= (byte7 & (2**j)) << (j * 7", "* 7 + 0) cur |= (byte2 & (2**j)) << (j * 7", "for j in range(8): cur |= (byte1 & (2**j)) << (j * 7", "# intentionally wrong data6 = struct.pack(\"BBB\", r, g, 0) # intentionally wrong data7", "cur |= (byte3 & (2**j)) << (j * 7 + 2) cur |=", "# interleaved = 1 # interleaved = 2 # interleaved = 4 interleaved", "* 7 + 3) cur |= (byte5 & (2**j)) << (j * 7", "<< (j * 7 + 5) cur |= (byte7 & (2**j)) << (j", "/ 2 r, g, b = colorsys.hsv_to_rgb( hue, 1, 16 + 16 *", "breaks/seams hue = float(x + n/10.) / PIXELS / 2 r, g, b", "intentionally wrong data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally wrong for i", "wrong data3 = struct.pack(\"BBB\", 0, g, 0) # intentionally wrong data4 = struct.pack(\"BBB\",", "& (2**j)) << (j + 1) f.write(struct.pack(\">H\", cur)) elif interleaved == 4: data1", "= ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) for j in range(8): cur", "= ord(data4[i]) for j in range(8): cur |= (byte1 & (2**j)) << (j", "hue, 1, 16 + 16 * math.sin(2. * math.pi * (5. * -x", "= struct.pack(\"BBB\", 0, g, b) # intentionally wrong data6 = struct.pack(\"BBB\", r, g,", "g, b) data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally wrong for i", "3 + 0) cur |= (byte2 & (2**j)) << (j * 3 +", "|= (byte7 & (2**j)) << (j * 7 + 6) cur |= (byte8", "struct.pack(\"BBB\", r, g, 0) # intentionally wrong data7 = struct.pack(\"BBB\", r, 0, b)", "+ n/10.) / PIXELS / 2 r, g, b = colorsys.hsv_to_rgb( hue, 1,", "+ 1) cur |= (byte3 & (2**j)) << (j * 3 + 2)", "byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) byte5 = ord(data5[i]) byte6", "+ 5) cur |= (byte7 & (2**j)) << (j * 7 + 6)", "cur |= (byte1 & (2**j)) << (j * 7 + 0) cur |=", "byte7 = ord(data7[i]) byte8 = ord(data8[i]) for j in range(8): cur |= (byte1", "6) cur |= (byte8 & (2**j)) << (j * 7 + 7) f.write(struct.pack(\">Q\",", "ord(data8[i]) for j in range(8): cur |= (byte1 & (2**j)) << (j *", "wrong data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally wrong for i in", "|= (byte1 & (2**j)) << (j + 0) cur |= (byte2 & (2**j))", "1) cur |= (byte3 & (2**j)) << (j * 7 + 2) cur", "+ 0) cur |= (byte2 & (2**j)) << (j * 3 + 1)", "0, b) # intentionally wrong for i in range(len(data1)): cur = 0 byte1", "& (2**j)) << (j * 3 + 2) cur |= (byte4 & (2**j))", "2: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r, 0, 0)", "|= (byte1 & (2**j)) << (j * 3 + 0) cur |= (byte2", "ord(data6[i]) byte7 = ord(data7[i]) byte8 = ord(data8[i]) for j in range(8): cur |=", "for n in range(1000): for x in range(PIXELS): # This way we get", "= colorsys.hsv_to_rgb( hue, 1, 16 + 16 * math.sin(2. * math.pi * (5.", "0) cur |= (byte2 & (2**j)) << (j + 1) f.write(struct.pack(\">H\", cur)) elif", "# interleaved = 2 # interleaved = 4 interleaved = 8 f =", "cur |= (byte1 & (2**j)) << (j + 0) cur |= (byte2 &", "|= (byte8 & (2**j)) << (j * 7 + 7) f.write(struct.pack(\">Q\", cur)) else:", "= ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) for j", "4: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r, 0, 0)", "0) # intentionally wrong data7 = struct.pack(\"BBB\", r, 0, b) # intentionally wrong", "elif interleaved == 8: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\",", "0, g, b) # intentionally wrong data6 = struct.pack(\"BBB\", r, g, 0) #", "data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally wrong for i in range(len(data1)):", "|= (byte2 & (2**j)) << (j * 3 + 1) cur |= (byte3", "PIXELS / 2 r, g, b = colorsys.hsv_to_rgb( hue, 1, 16 + 16", "b) # intentionally wrong data8 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong", "3 + 3) f.write(struct.pack(\">L\", cur)) elif interleaved == 8: data1 = struct.pack(\"BBB\", r,", "data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r, 0, 0) #", "\"wb\") for n in range(1000): for x in range(PIXELS): # This way we", "g, 0) # intentionally wrong data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally", "0, b) # intentionally wrong data8 = struct.pack(\"BBB\", 0, g, b) # intentionally", "r, 0, 0) # intentionally wrong for i in range(len(data1)): cur = 0", "if interleaved == 2: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\",", "(5. * -x + n / 3.) / 100.) ) if interleaved ==", "g, b = colorsys.hsv_to_rgb( hue, 1, 16 + 16 * math.sin(2. * math.pi", "(byte2 & (2**j)) << (j * 3 + 1) cur |= (byte3 &", "n/10.) / PIXELS / 2 r, g, b = colorsys.hsv_to_rgb( hue, 1, 16", "* 7 + 1) cur |= (byte3 & (2**j)) << (j * 7", "j in range(8): cur |= (byte1 & (2**j)) << (j * 7 +", "byte4 = ord(data4[i]) byte5 = ord(data5[i]) byte6 = ord(data6[i]) byte7 = ord(data7[i]) byte8", "+ 2) cur |= (byte4 & (2**j)) << (j * 7 + 3)", "0, 0) # intentionally wrong for i in range(len(data1)): cur = 0 byte1", "1) cur |= (byte3 & (2**j)) << (j * 3 + 2) cur", "ord(data3[i]) byte4 = ord(data4[i]) byte5 = ord(data5[i]) byte6 = ord(data6[i]) byte7 = ord(data7[i])", "+ n / 3.) / 100.) ) if interleaved == 2: data1 =", "wrong data5 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong data6 = struct.pack(\"BBB\",", "# This way we get a half \"rainbow\", easy to find breaks/seams hue", "intentionally wrong data6 = struct.pack(\"BBB\", r, g, 0) # intentionally wrong data7 =", "1) f.write(struct.pack(\">H\", cur)) elif interleaved == 4: data1 = struct.pack(\"BBB\", r, g, b)", "1, 16 + 16 * math.sin(2. * math.pi * (5. * -x +", "<< (j * 7 + 1) cur |= (byte3 & (2**j)) << (j", "byte3 = ord(data3[i]) byte4 = ord(data4[i]) byte5 = ord(data5[i]) byte6 = ord(data6[i]) byte7", "interleaved = 4 interleaved = 8 f = open(\"test_{}.bin\".format(interleaved), \"wb\") for n in", "# intentionally wrong data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally wrong data5", "for j in range(8): cur |= (byte1 & (2**j)) << (j + 0)", "|= (byte3 & (2**j)) << (j * 3 + 2) cur |= (byte4", "+ 3) f.write(struct.pack(\">L\", cur)) elif interleaved == 8: data1 = struct.pack(\"BBB\", r, g,", "(byte1 & (2**j)) << (j + 0) cur |= (byte2 & (2**j)) <<", "import math PIXELS = 94 # interleaved = 1 # interleaved = 2", ") if interleaved == 2: data1 = struct.pack(\"BBB\", r, g, b) data2 =", "= ord(data2[i]) for j in range(8): cur |= (byte1 & (2**j)) << (j", "byte1 = ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) for", "j in range(8): cur |= (byte1 & (2**j)) << (j + 0) cur", "0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) for j in range(8): cur |=", "import colorsys import struct import math PIXELS = 94 # interleaved = 1", "wrong data8 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong for i in", "# intentionally wrong data7 = struct.pack(\"BBB\", r, 0, b) # intentionally wrong data8", "ord(data2[i]) for j in range(8): cur |= (byte1 & (2**j)) << (j +", "byte6 = ord(data6[i]) byte7 = ord(data7[i]) byte8 = ord(data8[i]) for j in range(8):", "float(x + n/10.) / PIXELS / 2 r, g, b = colorsys.hsv_to_rgb( hue,", "cur |= (byte5 & (2**j)) << (j * 7 + 4) cur |=", "wrong for i in range(len(data1)): cur = 0 byte1 = ord(data1[i]) byte2 =", "struct.pack(\"BBB\", 0, g, 0) # intentionally wrong data4 = struct.pack(\"BBB\", 0, 0, b)", "byte8 = ord(data8[i]) for j in range(8): cur |= (byte1 & (2**j)) <<", "0, g, 0) # intentionally wrong data4 = struct.pack(\"BBB\", 0, 0, b) #", "16 + 16 * math.sin(2. * math.pi * (5. * -x + n", "# intentionally wrong for i in range(len(data1)): cur = 0 byte1 = ord(data1[i])", "(byte3 & (2**j)) << (j * 3 + 2) cur |= (byte4 &", "* 3 + 1) cur |= (byte3 & (2**j)) << (j * 3", "<< (j * 3 + 1) cur |= (byte3 & (2**j)) << (j", "This way we get a half \"rainbow\", easy to find breaks/seams hue =", "100.) ) if interleaved == 2: data1 = struct.pack(\"BBB\", r, g, b) data2", "(2**j)) << (j * 7 + 1) cur |= (byte3 & (2**j)) <<", "struct import math PIXELS = 94 # interleaved = 1 # interleaved =", "range(PIXELS): # This way we get a half \"rainbow\", easy to find breaks/seams", "* math.sin(2. * math.pi * (5. * -x + n / 3.) /", "(j * 3 + 0) cur |= (byte2 & (2**j)) << (j *", "0) # intentionally wrong for i in range(len(data1)): cur = 0 byte1 =", "= 4 interleaved = 8 f = open(\"test_{}.bin\".format(interleaved), \"wb\") for n in range(1000):", "range(1000): for x in range(PIXELS): # This way we get a half \"rainbow\",", "= struct.pack(\"BBB\", 0, 0, b) # intentionally wrong for i in range(len(data1)): cur", "(byte3 & (2**j)) << (j * 7 + 2) cur |= (byte4 &", "i in range(len(data1)): cur = 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) for", "7 + 2) cur |= (byte4 & (2**j)) << (j * 7 +", "f = open(\"test_{}.bin\".format(interleaved), \"wb\") for n in range(1000): for x in range(PIXELS): #", "interleaved == 2: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r,", "r, g, 0) # intentionally wrong data7 = struct.pack(\"BBB\", r, 0, b) #", "(byte4 & (2**j)) << (j * 7 + 3) cur |= (byte5 &", "byte4 = ord(data4[i]) for j in range(8): cur |= (byte1 & (2**j)) <<", "= struct.pack(\"BBB\", r, 0, 0) # intentionally wrong data3 = struct.pack(\"BBB\", 0, g,", "(j * 7 + 6) cur |= (byte8 & (2**j)) << (j *", "= ord(data3[i]) byte4 = ord(data4[i]) for j in range(8): cur |= (byte1 &", "g, b) # intentionally wrong data6 = struct.pack(\"BBB\", r, g, 0) # intentionally", "(2**j)) << (j * 3 + 3) f.write(struct.pack(\">L\", cur)) elif interleaved == 8:", "/ 3.) / 100.) ) if interleaved == 2: data1 = struct.pack(\"BBB\", r,", "|= (byte1 & (2**j)) << (j * 7 + 0) cur |= (byte2", "(j * 7 + 7) f.write(struct.pack(\">Q\", cur)) else: # No interleaving f.write(struct.pack(\"BBB\", r,", "struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally wrong", "(byte1 & (2**j)) << (j * 3 + 0) cur |= (byte2 &", "r, 0, b) # intentionally wrong data8 = struct.pack(\"BBB\", 0, g, b) #", "* 7 + 2) cur |= (byte4 & (2**j)) << (j * 7", "= ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) byte5 =", "4 interleaved = 8 f = open(\"test_{}.bin\".format(interleaved), \"wb\") for n in range(1000): for", "byte2 = ord(data2[i]) for j in range(8): cur |= (byte1 & (2**j)) <<", "(2**j)) << (j * 3 + 2) cur |= (byte4 & (2**j)) <<", "byte3 = ord(data3[i]) byte4 = ord(data4[i]) for j in range(8): cur |= (byte1", "get a half \"rainbow\", easy to find breaks/seams hue = float(x + n/10.)", "find breaks/seams hue = float(x + n/10.) / PIXELS / 2 r, g,", "g, 0) # intentionally wrong data7 = struct.pack(\"BBB\", r, 0, b) # intentionally", "cur |= (byte4 & (2**j)) << (j * 7 + 3) cur |=", "+ 16 * math.sin(2. * math.pi * (5. * -x + n /", "b) data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally wrong data3 = struct.pack(\"BBB\",", "* 3 + 0) cur |= (byte2 & (2**j)) << (j * 3", "= struct.pack(\"BBB\", 0, g, 0) # intentionally wrong data4 = struct.pack(\"BBB\", 0, 0,", "0) # intentionally wrong data3 = struct.pack(\"BBB\", 0, g, 0) # intentionally wrong", "= ord(data6[i]) byte7 = ord(data7[i]) byte8 = ord(data8[i]) for j in range(8): cur", "(2**j)) << (j + 1) f.write(struct.pack(\">H\", cur)) elif interleaved == 4: data1 =", "(2**j)) << (j * 7 + 2) cur |= (byte4 & (2**j)) <<", "(j * 3 + 1) cur |= (byte3 & (2**j)) << (j *", "& (2**j)) << (j * 7 + 1) cur |= (byte3 & (2**j))", "7 + 6) cur |= (byte8 & (2**j)) << (j * 7 +", "cur |= (byte2 & (2**j)) << (j + 1) f.write(struct.pack(\">H\", cur)) elif interleaved", "cur |= (byte4 & (2**j)) << (j * 3 + 3) f.write(struct.pack(\">L\", cur))", "in range(1000): for x in range(PIXELS): # This way we get a half", "b) # intentionally wrong data6 = struct.pack(\"BBB\", r, g, 0) # intentionally wrong", "= struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally", "& (2**j)) << (j * 7 + 5) cur |= (byte7 & (2**j))", "& (2**j)) << (j * 3 + 1) cur |= (byte3 & (2**j))", "+ 6) cur |= (byte8 & (2**j)) << (j * 7 + 7)", "0) cur |= (byte2 & (2**j)) << (j * 3 + 1) cur", "|= (byte6 & (2**j)) << (j * 7 + 5) cur |= (byte7", "|= (byte3 & (2**j)) << (j * 7 + 2) cur |= (byte4", "cur = 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4", "* -x + n / 3.) / 100.) ) if interleaved == 2:", "= 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i]) byte4 =", "a half \"rainbow\", easy to find breaks/seams hue = float(x + n/10.) /", "i in range(len(data1)): cur = 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) byte3", "(2**j)) << (j * 7 + 5) cur |= (byte7 & (2**j)) <<", "struct.pack(\"BBB\", 0, 0, b) # intentionally wrong for i in range(len(data1)): cur =", "<< (j + 1) f.write(struct.pack(\">H\", cur)) elif interleaved == 4: data1 = struct.pack(\"BBB\",", "(j * 3 + 3) f.write(struct.pack(\">L\", cur)) elif interleaved == 8: data1 =", "interleaved == 4: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r,", "/ 100.) ) if interleaved == 2: data1 = struct.pack(\"BBB\", r, g, b)", "7 + 1) cur |= (byte3 & (2**j)) << (j * 7 +", "+ 0) cur |= (byte2 & (2**j)) << (j + 1) f.write(struct.pack(\">H\", cur))", "\"rainbow\", easy to find breaks/seams hue = float(x + n/10.) / PIXELS /", "g, b) data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally wrong data3 =", "way we get a half \"rainbow\", easy to find breaks/seams hue = float(x", "data8 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong for i in range(len(data1)):", "cur |= (byte2 & (2**j)) << (j * 7 + 1) cur |=", "data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally wrong for i in range(len(data1)):", "in range(8): cur |= (byte1 & (2**j)) << (j + 0) cur |=", "range(8): cur |= (byte1 & (2**j)) << (j * 3 + 0) cur", "== 8: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r, 0,", "ord(data7[i]) byte8 = ord(data8[i]) for j in range(8): cur |= (byte1 & (2**j))", "(byte5 & (2**j)) << (j * 7 + 4) cur |= (byte6 &", "f.write(struct.pack(\">L\", cur)) elif interleaved == 8: data1 = struct.pack(\"BBB\", r, g, b) data2", "(j * 7 + 1) cur |= (byte3 & (2**j)) << (j *", "3 + 1) cur |= (byte3 & (2**j)) << (j * 3 +", "in range(8): cur |= (byte1 & (2**j)) << (j * 7 + 0)", "ord(data4[i]) for j in range(8): cur |= (byte1 & (2**j)) << (j *", "b = colorsys.hsv_to_rgb( hue, 1, 16 + 16 * math.sin(2. * math.pi *", "<< (j * 3 + 3) f.write(struct.pack(\">L\", cur)) elif interleaved == 8: data1", "7 + 3) cur |= (byte5 & (2**j)) << (j * 7 +", "interleaved = 8 f = open(\"test_{}.bin\".format(interleaved), \"wb\") for n in range(1000): for x", "half \"rainbow\", easy to find breaks/seams hue = float(x + n/10.) / PIXELS", "cur |= (byte2 & (2**j)) << (j * 3 + 1) cur |=", "struct.pack(\"BBB\", 0, g, b) # intentionally wrong data6 = struct.pack(\"BBB\", r, g, 0)", "for i in range(len(data1)): cur = 0 byte1 = ord(data1[i]) byte2 = ord(data2[i])", "|= (byte5 & (2**j)) << (j * 7 + 4) cur |= (byte6", "+ 1) f.write(struct.pack(\">H\", cur)) elif interleaved == 4: data1 = struct.pack(\"BBB\", r, g,", "byte1 = ord(data1[i]) byte2 = ord(data2[i]) for j in range(8): cur |= (byte1", "(byte1 & (2**j)) << (j * 7 + 0) cur |= (byte2 &", "data3 = struct.pack(\"BBB\", 0, g, 0) # intentionally wrong data4 = struct.pack(\"BBB\", 0,", "# intentionally wrong data8 = struct.pack(\"BBB\", 0, g, b) # intentionally wrong for", "(j * 7 + 5) cur |= (byte7 & (2**j)) << (j *", "r, g, b) data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally wrong data3", "= struct.pack(\"BBB\", r, 0, 0) # intentionally wrong for i in range(len(data1)): cur", "x in range(PIXELS): # This way we get a half \"rainbow\", easy to", "(byte2 & (2**j)) << (j + 1) f.write(struct.pack(\">H\", cur)) elif interleaved == 4:", "& (2**j)) << (j * 7 + 6) cur |= (byte8 & (2**j))", "cur)) elif interleaved == 8: data1 = struct.pack(\"BBB\", r, g, b) data2 =", "= 2 # interleaved = 4 interleaved = 8 f = open(\"test_{}.bin\".format(interleaved), \"wb\")", "data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally wrong data5 = struct.pack(\"BBB\", 0,", "intentionally wrong for i in range(len(data1)): cur = 0 byte1 = ord(data1[i]) byte2", "(2**j)) << (j * 3 + 1) cur |= (byte3 & (2**j)) <<", "range(len(data1)): cur = 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) byte3 = ord(data3[i])", "2) cur |= (byte4 & (2**j)) << (j * 3 + 3) f.write(struct.pack(\">L\",", "ord(data1[i]) byte2 = ord(data2[i]) for j in range(8): cur |= (byte1 & (2**j))", "= ord(data5[i]) byte6 = ord(data6[i]) byte7 = ord(data7[i]) byte8 = ord(data8[i]) for j", "n / 3.) / 100.) ) if interleaved == 2: data1 = struct.pack(\"BBB\",", "PIXELS = 94 # interleaved = 1 # interleaved = 2 # interleaved", "0, g, b) # intentionally wrong for i in range(len(data1)): cur = 0", "# intentionally wrong data3 = struct.pack(\"BBB\", 0, g, 0) # intentionally wrong data4", "r, 0, 0) # intentionally wrong data3 = struct.pack(\"BBB\", 0, g, 0) #", "(2**j)) << (j * 7 + 0) cur |= (byte2 & (2**j)) <<", "intentionally wrong data7 = struct.pack(\"BBB\", r, 0, b) # intentionally wrong data8 =", "cur |= (byte7 & (2**j)) << (j * 7 + 6) cur |=", "7 + 4) cur |= (byte6 & (2**j)) << (j * 7 +", "0) # intentionally wrong data4 = struct.pack(\"BBB\", 0, 0, b) # intentionally wrong", "in range(len(data1)): cur = 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) byte3 =", "+ 7) f.write(struct.pack(\">Q\", cur)) else: # No interleaving f.write(struct.pack(\"BBB\", r, g, b)) f.close()", "struct.pack(\"BBB\", r, 0, b) # intentionally wrong data8 = struct.pack(\"BBB\", 0, g, b)", "(j * 7 + 0) cur |= (byte2 & (2**j)) << (j *", "<< (j * 7 + 4) cur |= (byte6 & (2**j)) << (j", "+ 2) cur |= (byte4 & (2**j)) << (j * 3 + 3)", "cur)) elif interleaved == 4: data1 = struct.pack(\"BBB\", r, g, b) data2 =", "= 8 f = open(\"test_{}.bin\".format(interleaved), \"wb\") for n in range(1000): for x in", "<< (j * 7 + 2) cur |= (byte4 & (2**j)) << (j", "* (5. * -x + n / 3.) / 100.) ) if interleaved", "# interleaved = 4 interleaved = 8 f = open(\"test_{}.bin\".format(interleaved), \"wb\") for n", "7 + 0) cur |= (byte2 & (2**j)) << (j * 7 +", "ord(data3[i]) byte4 = ord(data4[i]) for j in range(8): cur |= (byte1 & (2**j))", "(j + 0) cur |= (byte2 & (2**j)) << (j + 1) f.write(struct.pack(\">H\",", "(2**j)) << (j * 7 + 6) cur |= (byte8 & (2**j)) <<", "struct.pack(\"BBB\", 0, 0, b) # intentionally wrong data5 = struct.pack(\"BBB\", 0, g, b)", "* math.pi * (5. * -x + n / 3.) / 100.) )", "= ord(data1[i]) byte2 = ord(data2[i]) for j in range(8): cur |= (byte1 &", "<< (j * 7 + 0) cur |= (byte2 & (2**j)) << (j", "== 4: data1 = struct.pack(\"BBB\", r, g, b) data2 = struct.pack(\"BBB\", r, 0,", "* 7 + 4) cur |= (byte6 & (2**j)) << (j * 7", "* 3 + 2) cur |= (byte4 & (2**j)) << (j * 3", "7 + 5) cur |= (byte7 & (2**j)) << (j * 7 +", "intentionally wrong data3 = struct.pack(\"BBB\", 0, g, 0) # intentionally wrong data4 =", "cur |= (byte6 & (2**j)) << (j * 7 + 5) cur |=", "range(8): cur |= (byte1 & (2**j)) << (j + 0) cur |= (byte2", "& (2**j)) << (j * 7 + 7) f.write(struct.pack(\">Q\", cur)) else: # No", "math PIXELS = 94 # interleaved = 1 # interleaved = 2 #", "* 3 + 3) f.write(struct.pack(\">L\", cur)) elif interleaved == 8: data1 = struct.pack(\"BBB\",", "= ord(data2[i]) byte3 = ord(data3[i]) byte4 = ord(data4[i]) byte5 = ord(data5[i]) byte6 =", "range(len(data1)): cur = 0 byte1 = ord(data1[i]) byte2 = ord(data2[i]) for j in", "f.write(struct.pack(\">H\", cur)) elif interleaved == 4: data1 = struct.pack(\"BBB\", r, g, b) data2", "+ 0) cur |= (byte2 & (2**j)) << (j * 7 + 1)", "r, g, b = colorsys.hsv_to_rgb( hue, 1, 16 + 16 * math.sin(2. *", "data2 = struct.pack(\"BBB\", r, 0, 0) # intentionally wrong data3 = struct.pack(\"BBB\", 0,", "(j * 7 + 2) cur |= (byte4 & (2**j)) << (j *", "open(\"test_{}.bin\".format(interleaved), \"wb\") for n in range(1000): for x in range(PIXELS): # This way", "wrong data7 = struct.pack(\"BBB\", r, 0, b) # intentionally wrong data8 = struct.pack(\"BBB\"," ]
[ "library imports from http import HTTPStatus # Pip package imports from flask import", "if email.is_active: email.is_active = False email.save(commit=True) if not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return", "#return redirect(get_url()) return return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND else: email = NewsletterSubscribe.get_by(email=email_str)", "Python library imports from http import HTTPStatus # Pip package imports from flask", "not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email': email, 'status': 'You are successfully", "redirect view #return redirect(get_url()) return return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND else: email", "..utils import generate_resubscribe_link from .blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token): email_str =", "if email_str is None: if not request.is_json: # Return redirect view #return redirect(get_url())", "jsonify({ 'email': email, 'status': 'You are successfully unsubscribed from our mailing list.', })", "if the user is still active if email.is_active: email.is_active = False email.save(commit=True) if", "Commit only if the user is still active if email.is_active: email.is_active = False", "..models import NewsletterSubscribe from ..utils import generate_resubscribe_link from .blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET'])", "jsonify # Internal package imports from backend.utils import decode_token from ..models import NewsletterSubscribe", "# Pip package imports from flask import render_template, request, jsonify # Internal package", "Internal package imports from backend.utils import decode_token from ..models import NewsletterSubscribe from ..utils", "backend.utils import decode_token from ..models import NewsletterSubscribe from ..utils import generate_resubscribe_link from .blueprint", "token given.'}), HTTPStatus.NOT_FOUND else: email = NewsletterSubscribe.get_by(email=email_str) # Commit only if the user", "coding: utf-8 -*- # Common Python library imports from http import HTTPStatus #", "email_str = decode_token(token) if email_str is None: if not request.is_json: # Return redirect", "imports from flask import render_template, request, jsonify # Internal package imports from backend.utils", "Pip package imports from flask import render_template, request, jsonify # Internal package imports", "request.is_json: # Return redirect view #return redirect(get_url()) return return jsonify({'errors': 'Invalid token given.'}),", "False email.save(commit=True) if not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email': email, 'status':", "decode_token(token) if email_str is None: if not request.is_json: # Return redirect view #return", ".blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token): email_str = decode_token(token) if email_str is", "not request.is_json: # Return redirect view #return redirect(get_url()) return return jsonify({'errors': 'Invalid token", "user is still active if email.is_active: email.is_active = False email.save(commit=True) if not request.is_json:", "Return redirect view #return redirect(get_url()) return return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND else:", "from backend.utils import decode_token from ..models import NewsletterSubscribe from ..utils import generate_resubscribe_link from", "only if the user is still active if email.is_active: email.is_active = False email.save(commit=True)", "return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email': email, 'status': 'You are successfully unsubscribed from", "utf-8 -*- # Common Python library imports from http import HTTPStatus # Pip", "Common Python library imports from http import HTTPStatus # Pip package imports from", "resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email': email, 'status': 'You are successfully unsubscribed from our mailing", "request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email': email, 'status': 'You are successfully unsubscribed", "email.save(commit=True) if not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email': email, 'status': 'You", "render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email': email, 'status': 'You are successfully unsubscribed from our", "# Return redirect view #return redirect(get_url()) return return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND", "import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token): email_str = decode_token(token) if email_str is None:", "from flask import render_template, request, jsonify # Internal package imports from backend.utils import", "else: email = NewsletterSubscribe.get_by(email=email_str) # Commit only if the user is still active", "@newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token): email_str = decode_token(token) if email_str is None: if not", "redirect(get_url()) return return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND else: email = NewsletterSubscribe.get_by(email=email_str) #", "from ..models import NewsletterSubscribe from ..utils import generate_resubscribe_link from .blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>',", "import NewsletterSubscribe from ..utils import generate_resubscribe_link from .blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def", "package imports from flask import render_template, request, jsonify # Internal package imports from", "the user is still active if email.is_active: email.is_active = False email.save(commit=True) if not", "from .blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token): email_str = decode_token(token) if email_str", "'Invalid token given.'}), HTTPStatus.NOT_FOUND else: email = NewsletterSubscribe.get_by(email=email_str) # Commit only if the", "generate_resubscribe_link from .blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token): email_str = decode_token(token) if", "still active if email.is_active: email.is_active = False email.save(commit=True) if not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html',", "render_template, request, jsonify # Internal package imports from backend.utils import decode_token from ..models", "package imports from backend.utils import decode_token from ..models import NewsletterSubscribe from ..utils import", "given.'}), HTTPStatus.NOT_FOUND else: email = NewsletterSubscribe.get_by(email=email_str) # Commit only if the user is", "return return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND else: email = NewsletterSubscribe.get_by(email=email_str) # Commit", "is still active if email.is_active: email.is_active = False email.save(commit=True) if not request.is_json: return", "-*- coding: utf-8 -*- # Common Python library imports from http import HTTPStatus", "http import HTTPStatus # Pip package imports from flask import render_template, request, jsonify", "import decode_token from ..models import NewsletterSubscribe from ..utils import generate_resubscribe_link from .blueprint import", "= NewsletterSubscribe.get_by(email=email_str) # Commit only if the user is still active if email.is_active:", "HTTPStatus.NOT_FOUND else: email = NewsletterSubscribe.get_by(email=email_str) # Commit only if the user is still", "decode_token from ..models import NewsletterSubscribe from ..utils import generate_resubscribe_link from .blueprint import newsletter_subscribe", "newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token): email_str = decode_token(token) if email_str is None: if", "#!/usr/bin/env python # -*- coding: utf-8 -*- # Common Python library imports from", "None: if not request.is_json: # Return redirect view #return redirect(get_url()) return return jsonify({'errors':", "if not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email': email, 'status': 'You are", "imports from http import HTTPStatus # Pip package imports from flask import render_template,", "from http import HTTPStatus # Pip package imports from flask import render_template, request,", "imports from backend.utils import decode_token from ..models import NewsletterSubscribe from ..utils import generate_resubscribe_link", "import generate_resubscribe_link from .blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token): email_str = decode_token(token)", "python # -*- coding: utf-8 -*- # Common Python library imports from http", "def unsubscribe(token): email_str = decode_token(token) if email_str is None: if not request.is_json: #", "active if email.is_active: email.is_active = False email.save(commit=True) if not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email))", "= False email.save(commit=True) if not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email': email,", "NewsletterSubscribe from ..utils import generate_resubscribe_link from .blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token):", "email.is_active: email.is_active = False email.save(commit=True) if not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({", "# -*- coding: utf-8 -*- # Common Python library imports from http import", "from ..utils import generate_resubscribe_link from .blueprint import newsletter_subscribe @newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET']) def unsubscribe(token): email_str", "return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND else: email = NewsletterSubscribe.get_by(email=email_str) # Commit only", "import HTTPStatus # Pip package imports from flask import render_template, request, jsonify #", "email.is_active = False email.save(commit=True) if not request.is_json: return render_template('newsletter_subscribe/email/confirm_unsubscribe.html', resubscribe_link=generate_resubscribe_link(email.email)) return jsonify({ 'email':", "email_str is None: if not request.is_json: # Return redirect view #return redirect(get_url()) return", "return jsonify({ 'email': email, 'status': 'You are successfully unsubscribed from our mailing list.',", "= decode_token(token) if email_str is None: if not request.is_json: # Return redirect view", "# Common Python library imports from http import HTTPStatus # Pip package imports", "email = NewsletterSubscribe.get_by(email=email_str) # Commit only if the user is still active if", "# Internal package imports from backend.utils import decode_token from ..models import NewsletterSubscribe from", "flask import render_template, request, jsonify # Internal package imports from backend.utils import decode_token", "is None: if not request.is_json: # Return redirect view #return redirect(get_url()) return return", "HTTPStatus # Pip package imports from flask import render_template, request, jsonify # Internal", "# Commit only if the user is still active if email.is_active: email.is_active =", "-*- # Common Python library imports from http import HTTPStatus # Pip package", "NewsletterSubscribe.get_by(email=email_str) # Commit only if the user is still active if email.is_active: email.is_active", "view #return redirect(get_url()) return return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND else: email =", "import render_template, request, jsonify # Internal package imports from backend.utils import decode_token from", "unsubscribe(token): email_str = decode_token(token) if email_str is None: if not request.is_json: # Return", "if not request.is_json: # Return redirect view #return redirect(get_url()) return return jsonify({'errors': 'Invalid", "methods=['GET']) def unsubscribe(token): email_str = decode_token(token) if email_str is None: if not request.is_json:", "jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND else: email = NewsletterSubscribe.get_by(email=email_str) # Commit only if", "request, jsonify # Internal package imports from backend.utils import decode_token from ..models import" ]
[ "<reponame>GRV96/jazal from os import system system(\"pytest path_util_tests.py\") system(\"pytest path_checker_tests.py\") system(\"pytest reactive_path_checker_tests.py\") system(\"pytest missing_path_arg_warner_tests.py\")" ]
[]
[ "!= 0, other=1) def transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim,", "self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim ret = [] for i in range(X.shape[list(X.dims).index(self.feature_dim)]):", "max self.range = max - min self.min, self.max, self.x_range = None, None, None", "self.x_range = None, None, None def fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim,", "= [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min) / self.range) * self.x_range +", "= max - min self.min, self.max, self.x_range = None, None, None def fit(self,", "self.min) r = xr.concat(ret, self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Inverse", "+ self.min) r = xr.concat(ret, self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax", "x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "MinMaxScaler before transform'.format(dt.datetime.now()) r = ((X - self.min) / self.x_range) * self.range +", "X1 = X.isel() self.min = X1.min(x_sample_dim) self.max = X1.max(x_sample_dim) self.x_range = self.max -", "self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim", "Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Transform' return r def", "self.range_max = min, max self.range = max - min self.min, self.max, self.x_range =", "Transform' return r def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim,", "x_feature_dim) assert self.min is not None and self.max is not None, '{} Must", "min, max self.range = max - min self.min, self.max, self.x_range = None, None,", "= X1.min(x_sample_dim) self.max = X1.max(x_sample_dim) self.x_range = self.max - self.min self.x_range = self.x_range.where(self.x_range", "x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim ret = [] for i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd =", "self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min) /", "self.x_range = self.max - self.min self.x_range = self.x_range.where(self.x_range != 0, other=1) def transform(self,", "x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim", "r.attrs.keys() else '\\n XCAST MinMax Transform' return r def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None,", "self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim X1 = X.isel() self.min = X1.min(x_sample_dim)", "x_sample_dim, x_feature_dim) #X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim}) self.min = self.min.rename({", "- self.range_min) / self.range) * self.x_range + self.min) r = xr.concat(ret, self.feature_dim) r.attrs['generated_by']", "self.max = X1.max(x_sample_dim) self.x_range = self.max - self.min self.x_range = self.x_range.where(self.x_range != 0,", "r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Inverse Transform' if 'generated_by' in r.attrs.keys()", "x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) assert self.min is not None and self.max is not", "* class MinMax: def __init__(self, min=-1, max=1): self.range_min, self.range_max = min, max self.range", "x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim X1 =", "range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim: i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] =", "MinMax Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Transform' return r", "self.x_range) * self.range + self.range_min r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Transform'", "guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) assert self.min is", "x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X,", "x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #X1", "x_lon_dim, x_sample_dim, x_feature_dim) assert self.min is not None and self.max is not None,", "None and self.max is not None, '{} Must Fit MinMaxScaler before inverse transform'.format(dt.datetime.now())", "None def fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim =", "x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim,", "is not None, '{} Must Fit MinMaxScaler before transform'.format(dt.datetime.now()) r = ((X -", "self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim assert", "Fit MinMaxScaler before transform'.format(dt.datetime.now()) r = ((X - self.min) / self.x_range) * self.range", "x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim,", "None, '{} Must Fit MinMaxScaler before transform'.format(dt.datetime.now()) r = ((X - self.min) /", "= r.attrs['generated_by'] + '\\n XCAST MinMax Transform' if 'generated_by' in r.attrs.keys() else '\\n", "self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim,", "self.range + self.range_min r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Transform' if 'generated_by'", "in range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim: i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim]", "'\\n XCAST MinMax Inverse Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax", "x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) assert self.min is not None and self.max", "self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim assert self.min is not None and", "and self.max is not None, '{} Must Fit MinMaxScaler before transform'.format(dt.datetime.now()) r =", "x_lon_dim, x_sample_dim, x_feature_dim) #X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim}) self.min =", "self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({ self.lat_dim:x_lat_dim,", "max - min self.min, self.max, self.x_range = None, None, None def fit(self, X,", "X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim}) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max", "'\\n XCAST MinMax Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Transform'", "x_lat_dim, x_lon_dim, x_feature_dim ret = [] for i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim:", "x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim,", "= X.isel() self.min = X1.min(x_sample_dim) self.max = X1.max(x_sample_dim) self.x_range = self.max - self.min", "None, None def fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim", "= X1.max(x_sample_dim) self.x_range = self.max - self.min self.x_range = self.x_range.where(self.x_range != 0, other=1)", "transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim,", "not None and self.max is not None, '{} Must Fit MinMaxScaler before inverse", "0, other=1) def transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim", "for i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim: i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] =", "check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim})", "self.sample_dim}) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})", "def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X,", "__init__(self, min=-1, max=1): self.range_min, self.range_max = min, max self.range = max - min", "= [] for i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim: i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]]", "self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Inverse Transform' if 'generated_by' in", "Must Fit MinMaxScaler before inverse transform'.format(dt.datetime.now()) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max", "self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim,", "sd = {x_feature_dim: i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]]", "x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim X1 = X.isel() self.min = X1.min(x_sample_dim) self.max = X1.max(x_sample_dim)", "= X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim}) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})", "return r def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim", "x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim,", "#X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim}) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim,", "self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min) / self.range) * self.x_range + self.min) r", "r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Transform' if 'generated_by' in r.attrs.keys() else", "None, None, None def fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim,", "XCAST MinMax Inverse Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Inverse", "self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim ret = [] for", "self.min) / self.x_range) * self.range + self.range_min r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST", "XCAST MinMax Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Transform' return", "= self.x_range.where(self.x_range != 0, other=1) def transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim,", "x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #X1 = X.rename({x_lat_dim: self.lat_dim,", "= self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range =", "r.attrs['generated_by'] + '\\n XCAST MinMax Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST", "x_lat_dim, x_lon_dim, x_feature_dim X1 = X.isel() self.min = X1.min(x_sample_dim) self.max = X1.max(x_sample_dim) self.x_range", "not None, '{} Must Fit MinMaxScaler before inverse transform'.format(dt.datetime.now()) self.min = self.min.rename({ self.lat_dim:x_lat_dim,", "self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim ret", "X1.max(x_sample_dim) self.x_range = self.max - self.min self.x_range = self.x_range.where(self.x_range != 0, other=1) def", "= x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim X1 = X.isel() self.min = X1.min(x_sample_dim) self.max =", "not None and self.max is not None, '{} Must Fit MinMaxScaler before transform'.format(dt.datetime.now())", "max=1): self.range_min, self.range_max = min, max self.range = max - min self.min, self.max,", "= guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim,", "check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim,", "self.max is not None, '{} Must Fit MinMaxScaler before inverse transform'.format(dt.datetime.now()) self.min =", "min self.min, self.max, self.x_range = None, None, None def fit(self, X, x_lat_dim=None, x_lon_dim=None,", "self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim assert self.min is", "= None, None, None def fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim,", "'{} Must Fit MinMaxScaler before transform'.format(dt.datetime.now()) r = ((X - self.min) / self.x_range)", "in r.attrs.keys() else '\\n XCAST MinMax Transform' return r def inverse_transform(self, X, x_lat_dim=None,", "None, '{} Must Fit MinMaxScaler before inverse transform'.format(dt.datetime.now()) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim,", "check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) assert self.min is not None and self.max is", "x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim}) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({", "+ self.range_min r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Transform' if 'generated_by' in", "<reponame>kjhall01/xcast from ..core.utilities import * class MinMax: def __init__(self, min=-1, max=1): self.range_min, self.range_max", "X.isel() self.min = X1.min(x_sample_dim) self.max = X1.max(x_sample_dim) self.x_range = self.max - self.min self.x_range", "x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim,", "other=1) def transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim =", "self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range", "before transform'.format(dt.datetime.now()) r = ((X - self.min) / self.x_range) * self.range + self.range_min", "and self.max is not None, '{} Must Fit MinMaxScaler before inverse transform'.format(dt.datetime.now()) self.min", "x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim:", "XCAST MinMax Transform' return r def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim,", "x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) assert self.min is not None and", "self.lon_dim, x_sample_dim: self.sample_dim}) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim,", "[X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min) / self.range) * self.x_range + self.min) r = xr.concat(ret,", "self.range) * self.x_range + self.min) r = xr.concat(ret, self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by'] +", "xr.concat(ret, self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Inverse Transform' if 'generated_by'", "MinMax Inverse Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Inverse Transform'", "x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim", "self.max - self.min self.x_range = self.x_range.where(self.x_range != 0, other=1) def transform(self, X, x_lat_dim=None,", "self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim,", "((X - self.min) / self.x_range) * self.range + self.range_min r.attrs['generated_by'] = r.attrs['generated_by'] +", "x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim}) self.min", "'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Transform' return r def inverse_transform(self, X,", "= self.max - self.min self.x_range = self.x_range.where(self.x_range != 0, other=1) def transform(self, X,", "= {x_feature_dim: i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd)", "i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min)", "= min, max self.range = max - min self.min, self.max, self.x_range = None,", "= self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim,", "..core.utilities import * class MinMax: def __init__(self, min=-1, max=1): self.range_min, self.range_max = min,", "self.range = max - min self.min, self.max, self.x_range = None, None, None def", "x_lon_dim, x_feature_dim X1 = X.isel() self.min = X1.min(x_sample_dim) self.max = X1.max(x_sample_dim) self.x_range =", "r = ((X - self.min) / self.x_range) * self.range + self.range_min r.attrs['generated_by'] =", "self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim,", "x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim,", "x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim,", "ret = [] for i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim: i} self.max.coords[self.feature_dim] =", "from ..core.utilities import * class MinMax: def __init__(self, min=-1, max=1): self.range_min, self.range_max =", "x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) assert self.min is not None", "r def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim =", "MinMax: def __init__(self, min=-1, max=1): self.range_min, self.range_max = min, max self.range = max", "Fit MinMaxScaler before inverse transform'.format(dt.datetime.now()) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max =", "before inverse transform'.format(dt.datetime.now()) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim,", "guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim,", "self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim ret =", "self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min) / self.range) * self.x_range", "i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim: i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]]", "transform'.format(dt.datetime.now()) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})", "x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim,", "'\\n XCAST MinMax Transform' return r def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None):", "def fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X,", "self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim}) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max =", "= r.attrs['generated_by'] + '\\n XCAST MinMax Inverse Transform' if 'generated_by' in r.attrs.keys() else", "MinMaxScaler before inverse transform'.format(dt.datetime.now()) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({", "x_feature_dim assert self.min is not None and self.max is not None, '{} Must", "import * class MinMax: def __init__(self, min=-1, max=1): self.range_min, self.range_max = min, max", "min=-1, max=1): self.range_min, self.range_max = min, max self.range = max - min self.min,", "self.range_min, self.range_max = min, max self.range = max - min self.min, self.max, self.x_range", "self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim =", "self.min is not None and self.max is not None, '{} Must Fit MinMaxScaler", "= guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) assert self.min", "guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #X1 = X.rename({x_lat_dim:", "self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim X1 = X.isel() self.min = X1.min(x_sample_dim) self.max", "not None, '{} Must Fit MinMaxScaler before transform'.format(dt.datetime.now()) r = ((X - self.min)", "- self.min self.x_range = self.x_range.where(self.x_range != 0, other=1) def transform(self, X, x_lat_dim=None, x_lon_dim=None,", "= ((X - self.min) / self.x_range) * self.range + self.range_min r.attrs['generated_by'] = r.attrs['generated_by']", "x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim X1", "+ '\\n XCAST MinMax Inverse Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST", "self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim ret = []", "ret.append(((X.isel(**sd) - self.range_min) / self.range) * self.x_range + self.min) r = xr.concat(ret, self.feature_dim)", "self.x_range.where(self.x_range != 0, other=1) def transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim,", "inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim,", "def __init__(self, min=-1, max=1): self.range_min, self.range_max = min, max self.range = max -", "x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim:", "[X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min) / self.range) * self.x_range + self.min)", "= [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min) / self.range) * self.x_range + self.min) r =", "self.x_range + self.min) r = xr.concat(ret, self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST", "Inverse Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Inverse Transform' return", "None and self.max is not None, '{} Must Fit MinMaxScaler before transform'.format(dt.datetime.now()) r", "x_feature_dim ret = [] for i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim: i} self.max.coords[self.feature_dim]", "self.x_range = self.x_range.where(self.x_range != 0, other=1) def transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None):", "X1.min(x_sample_dim) self.max = X1.max(x_sample_dim) self.x_range = self.max - self.min self.x_range = self.x_range.where(self.x_range !=", "is not None and self.max is not None, '{} Must Fit MinMaxScaler before", "self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim ret = [] for i", "self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim assert self.min is not None", "x_lon_dim, x_feature_dim ret = [] for i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim: i}", "self.min self.x_range = self.x_range.where(self.x_range != 0, other=1) def transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None,", "self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})", "= x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim assert self.min is not None and self.max is", "def transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X,", "{x_feature_dim: i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) -", "Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Inverse Transform' return r", "self.max, self.x_range = None, None, None def fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None):", "x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim assert self.min is not None and self.max is not", "self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim assert self.min", "x_lat_dim, x_lon_dim, x_feature_dim assert self.min is not None and self.max is not None,", "self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim assert self.min is not", "self.range_min) / self.range) * self.x_range + self.min) r = xr.concat(ret, self.feature_dim) r.attrs['generated_by'] =", "Must Fit MinMaxScaler before transform'.format(dt.datetime.now()) r = ((X - self.min) / self.x_range) *", "= self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim,", "is not None, '{} Must Fit MinMaxScaler before inverse transform'.format(dt.datetime.now()) self.min = self.min.rename({", "x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) assert", "transform'.format(dt.datetime.now()) r = ((X - self.min) / self.x_range) * self.range + self.range_min r.attrs['generated_by']", "X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim,", "self.min, self.max, self.x_range = None, None, None def fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None,", "self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim ret = [] for i in", "/ self.range) * self.x_range + self.min) r = xr.concat(ret, self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by']", "x_sample_dim, x_feature_dim) assert self.min is not None and self.max is not None, '{}", "self.min = X1.min(x_sample_dim) self.max = X1.max(x_sample_dim) self.x_range = self.max - self.min self.x_range =", "[] for i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd = {x_feature_dim: i} self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim]", "fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim,", "MinMax Transform' return r def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None): x_lat_dim, x_lon_dim,", "- min self.min, self.max, self.x_range = None, None, None def fit(self, X, x_lat_dim=None,", "x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim =", "* self.range + self.range_min r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Transform' if", "x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim X1 = X.isel()", "= [X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min) / self.range)", "self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim X1 = X.isel() self.min", "self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim,", "'{} Must Fit MinMaxScaler before inverse transform'.format(dt.datetime.now()) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})", "= x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim ret = [] for i in range(X.shape[list(X.dims).index(self.feature_dim)]): sd", "* self.x_range + self.min) r = xr.concat(ret, self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n", "[X.coords[self.feature_dim].values[i]] self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]] ret.append(((X.isel(**sd) - self.range_min) / self.range) *", "self.max is not None, '{} Must Fit MinMaxScaler before transform'.format(dt.datetime.now()) r = ((X", "x_lon_dim, x_feature_dim assert self.min is not None and self.max is not None, '{}", "x_sample_dim: self.sample_dim}) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim,", "if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax Transform' return r def inverse_transform(self,", "+ '\\n XCAST MinMax Transform' if 'generated_by' in r.attrs.keys() else '\\n XCAST MinMax", "inverse transform'.format(dt.datetime.now()) self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim,", "- self.min) / self.x_range) * self.range + self.range_min r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n", "x_feature_dim X1 = X.isel() self.min = X1.min(x_sample_dim) self.max = X1.max(x_sample_dim) self.x_range = self.max", "/ self.x_range) * self.range + self.range_min r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax", "r = xr.concat(ret, self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Inverse Transform'", "self.range_min r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Transform' if 'generated_by' in r.attrs.keys()", "assert self.min is not None and self.max is not None, '{} Must Fit", "self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim}) self.x_range = self.max.rename({", "x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) assert self.min is not", "x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim", "r.attrs['generated_by'] + '\\n XCAST MinMax Inverse Transform' if 'generated_by' in r.attrs.keys() else '\\n", "self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim X1 = X.isel() self.min =", "x_feature_dim) #X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim}) self.min = self.min.rename({ self.lat_dim:x_lat_dim,", "= xr.concat(ret, self.feature_dim) r.attrs['generated_by'] = r.attrs['generated_by'] + '\\n XCAST MinMax Inverse Transform' if", "class MinMax: def __init__(self, min=-1, max=1): self.range_min, self.range_max = min, max self.range =", "else '\\n XCAST MinMax Transform' return r def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None,", "self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim assert self.min is not None and self.max", "= guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #X1 =" ]
[ "lldpcli(\"update\") assert result.returncode == 0 time.sleep(3) with namespaces(1): out = lldpcli(\"-f\", \"keyvalue\", \"show\",", "in custom_values: result = lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what))) assert result.returncode == 0 result", "not in out.items() test_default_inventory(namespaces, lldpcli) custom_values = [ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software',", "# /sys/class/dmi/id/* for what, value in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what),", "\"keyvalue\", \"show\", \"neighbors\", \"details\") for what, pfx, value in custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx)", "in config.lldpd.features\", reason=\"LLDP-MED not supported\") class TestConfigInventory(object): def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces,", "'1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] ==", "namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") assert out['lldp.eth0.chassis.name'] ==", "custom_values: result = lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what))) assert result.returncode == 0 result =", "assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not", "os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert", "in out.items() assert 'lldp.eth0.lldp-med.inventory.model' not in out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items() assert", "time import shlex @pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\", reason=\"LLDP-MED not supported\") class TestConfigInventory(object): def", "result = lldpcli( *shlex.split(\"configure inventory {} {}\".format(what, value))) assert result.returncode == 0 result", "result = lldpcli(\"resume\") assert result.returncode == 0 result = lldpcli(\"update\") assert result.returncode ==", "import platform import time import shlex @pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\", reason=\"LLDP-MED not supported\")", "product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\") def test_default_inventory(namespaces, lldpcli): with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"):", "in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\") def test_default_inventory(namespaces,", "out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") for what, pfx, value in custom_values:", "'SQRT3_1.732050807568877') ] with namespaces(2): for what, pfx, value in custom_values: result = lldpcli(", "<filename>tests/integration/test_configinventory.py import os import pytest import platform import time import shlex @pytest.mark.skipif(\"'LLDP-MED' not", "assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert", "in out.items() test_default_inventory(namespaces, lldpcli) custom_values = [ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'),", "assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122'", "'lldp.eth0.lldp-med.inventory.serial' not in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items() assert 'lldp.eth0.lldp-med.inventory.model' not in", "sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\") def test_default_inventory(namespaces, lldpcli): with namespaces(1): if", "assert out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware'", "assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items() assert 'lldp.eth0.lldp-med.inventory.model' not", "0 time.sleep(3) with namespaces(1): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") for what,", "inventory {}\".format(what))) assert result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode == 0", "test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces, replace_file): with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for", "('serial', 'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'), ('model', 'model', 'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877') ]", "namespaces(2): for what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"configure inventory {}", "value))) assert result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode == 0 result", "= lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] ==", "('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'), ('model',", "custom_values = [ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial',", "= lldpcli(\"resume\") assert result.returncode == 0 result = lldpcli(\"update\") assert result.returncode == 0", "'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'), ('model', 'model', 'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877') ] with", "'manufacturer', 'Cybertron'), ('model', 'model', 'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877') ] with namespaces(2): for what,", "pfx, value in custom_values: result = lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what))) assert result.returncode ==", "'1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\", "config.lldpd.features\", reason=\"LLDP-MED not supported\") class TestConfigInventory(object): def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces, replace_file):", "'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877') ] with namespaces(2): for what, pfx, value in custom_values:", "= lldpcli( *shlex.split(\"configure inventory {} {}\".format(what, value))) assert result.returncode == 0 result =", "time.sleep(3) with namespaces(1): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") for what, pfx,", "lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what))) assert result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode", "\"keyvalue\", \"show\", \"neighbors\", \"details\") assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert", "== 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] ==", "result = lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what))) assert result.returncode == 0 result = lldpcli(\"resume\")", "pfx, value in custom_values: result = lldpcli( *shlex.split(\"configure inventory {} {}\".format(what, value))) assert", "== '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] ==", "= \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] == value with namespaces(2): for what, pfx, value in", "in custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] == value with namespaces(2): for what,", "assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation'", "\"show\", \"neighbors\", \"details\") for what, pfx, value in custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert", "\"neighbors\", \"details\") for what, pfx, value in custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find]", "'lldp.eth0.lldp-med.inventory.model' not in out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items() assert 'lldp.eth0.lldp-med.inventory.software' not in", "@pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\", reason=\"LLDP-MED not supported\") class TestConfigInventory(object): def test_configinventory(self, lldpd1, lldpd,", "what, value in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\")", "'lldp.eth0.lldp-med.inventory.hardware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not in", "] with namespaces(2): for what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"configure", "= lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") for what, pfx, value in custom_values: key_to_find", "out[key_to_find] == value with namespaces(2): for what, pfx, value in custom_values: result =", "in custom_values: result = lldpcli( *shlex.split(\"configure inventory {} {}\".format(what, value))) assert result.returncode ==", "in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items() assert", "with namespaces(1): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") for what, pfx, value", "what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what))) assert result.returncode", "'487122' assert out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items() assert", "os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for what, value in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items():", "replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\") def test_default_inventory(namespaces, lldpcli): with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out =", "out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not", "assert out[key_to_find] == value with namespaces(2): for what, pfx, value in custom_values: result", "out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware']", "('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'), ('model', 'model', 'OptimusPrime'), ('asset',", "value in custom_values: result = lldpcli( *shlex.split(\"configure inventory {} {}\".format(what, value))) assert result.returncode", "reason=\"LLDP-MED not supported\") class TestConfigInventory(object): def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces, replace_file): with", "test_default_inventory(namespaces, lldpcli): with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\")", "namespaces(1): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") for what, pfx, value in", "out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert out['lldp.eth0.lldp-med.inventory.serial']", "for what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"configure inventory {} {}\".format(what,", "value) lldpd(\"-M\", \"1\") def test_default_inventory(namespaces, lldpcli): with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\",", "0 result = lldpcli(\"update\") assert result.returncode == 0 time.sleep(3) with namespaces(1): out =", "lldpcli(\"resume\") assert result.returncode == 0 result = lldpcli(\"update\") assert result.returncode == 0 test_default_inventory(namespaces,", "\\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert out['lldp.eth0.lldp-med.inventory.software'] ==", "out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items() assert 'lldp.eth0.lldp-med.inventory.software' not in out.items() test_default_inventory(namespaces, lldpcli)", "\"show\", \"neighbors\", \"details\") assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware']", "lldpd, lldpcli, namespaces, replace_file): with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for what, value", "TestConfigInventory(object): def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces, replace_file): with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): #", "in out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items() assert", "out.items() assert 'lldp.eth0.lldp-med.inventory.model' not in out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items() assert 'lldp.eth0.lldp-med.inventory.software'", "not in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items() assert 'lldp.eth0.lldp-med.inventory.model' not in out.items()", "with namespaces(2): for what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"unconfigure inventory", "lldpcli): with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") assert", "pytest import platform import time import shlex @pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\", reason=\"LLDP-MED not", "not in out.items() assert 'lldp.eth0.lldp-med.inventory.software' not in out.items() test_default_inventory(namespaces, lldpcli) custom_values = [", "'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512'", "== 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release() else: assert", "assert 'lldp.eth0.lldp-med.inventory.software' not in out.items() test_default_inventory(namespaces, lldpcli) custom_values = [ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'),", "out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular' assert", "not in out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items()", "not in out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items() assert 'lldp.eth0.lldp-med.inventory.software' not in out.items()", "\"1\") def test_default_inventory(namespaces, lldpcli): with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\", \"keyvalue\", \"show\",", "in out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items() assert 'lldp.eth0.lldp-med.inventory.software' not in out.items() test_default_inventory(namespaces,", "'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items() assert 'lldp.eth0.lldp-med.inventory.model' not in out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not in", "'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware'", "import time import shlex @pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\", reason=\"LLDP-MED not supported\") class TestConfigInventory(object):", "assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items() assert 'lldp.eth0.lldp-med.inventory.software' not in out.items() test_default_inventory(namespaces, lldpcli) custom_values", "== '487122' assert out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items()", "'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'), ('model', 'model',", "in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items() assert 'lldp.eth0.lldp-med.inventory.model' not in out.items() assert", "== 0 time.sleep(3) with namespaces(1): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") for", "*shlex.split(\"unconfigure inventory {}\".format(what))) assert result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode ==", "'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'), ('model', 'model', 'OptimusPrime'), ('asset', 'asset',", "out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items() assert 'lldp.eth0.lldp-med.inventory.model'", "== '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset']", "\"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] == value with namespaces(2): for what, pfx, value in custom_values:", "\"details\") assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10'", "out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert", "value with namespaces(2): for what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"unconfigure", "assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items() assert 'lldp.eth0.lldp-med.inventory.model' not in out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not", "assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not", "import os import pytest import platform import time import shlex @pytest.mark.skipif(\"'LLDP-MED' not in", "not in out.items() assert 'lldp.eth0.lldp-med.inventory.model' not in out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items()", "'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'), ('model', 'model', 'OptimusPrime'),", "dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\") def test_default_inventory(namespaces, lldpcli):", "assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert", "'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer',", "for what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what))) assert", "value in custom_values: result = lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what))) assert result.returncode == 0", "custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] == value with namespaces(2): for what, pfx,", "chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\") def test_default_inventory(namespaces, lldpcli): with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out", "supported\") class TestConfigInventory(object): def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces, replace_file): with namespaces(2): if", "= lldpcli(\"update\") assert result.returncode == 0 time.sleep(3) with namespaces(1): out = lldpcli(\"-f\", \"keyvalue\",", "key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] == value with namespaces(2): for what, pfx, value", "custom_values: result = lldpcli( *shlex.split(\"configure inventory {} {}\".format(what, value))) assert result.returncode == 0", "'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'), ('model', 'model', 'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877') ] with namespaces(2):", "with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for what, value in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\",", "value in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\") def", "bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\") def test_default_inventory(namespaces, lldpcli): with", "'lldp.eth0.lldp-med.inventory.asset' not in out.items() assert 'lldp.eth0.lldp-med.inventory.software' not in out.items() test_default_inventory(namespaces, lldpcli) custom_values =", "what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"configure inventory {} {}\".format(what, value)))", "== 0 result = lldpcli(\"update\") assert result.returncode == 0 time.sleep(3) with namespaces(1): out", "inventory {} {}\".format(what, value))) assert result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode", "platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items() assert", "replace_file): with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for what, value in dict(product_version=\"1.14\", bios_version=\"1.10\",", "out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert", "not supported\") class TestConfigInventory(object): def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces, replace_file): with namespaces(2):", "namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for what, value in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\",", "out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer'", "out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer']", "for what, value in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\",", "'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'), ('model', 'model', 'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877')", "shlex @pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\", reason=\"LLDP-MED not supported\") class TestConfigInventory(object): def test_configinventory(self, lldpd1,", "lldpd1, lldpd, lldpcli, namespaces, replace_file): with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for what,", "platform import time import shlex @pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\", reason=\"LLDP-MED not supported\") class", "lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") for what, pfx, value in custom_values: key_to_find =", "lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14'", "= lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what))) assert result.returncode == 0 result = lldpcli(\"resume\") assert", "value in custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] == value with namespaces(2): for", "result.returncode == 0 time.sleep(3) with namespaces(1): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\")", "assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular'", "import shlex @pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\", reason=\"LLDP-MED not supported\") class TestConfigInventory(object): def test_configinventory(self,", "== \\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert out['lldp.eth0.lldp-med.inventory.software']", "class TestConfigInventory(object): def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces, replace_file): with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"):", "lldpcli) custom_values = [ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'),", "*shlex.split(\"configure inventory {} {}\".format(what, value))) assert result.returncode == 0 result = lldpcli(\"resume\") assert", "result = lldpcli(\"update\") assert result.returncode == 0 time.sleep(3) with namespaces(1): out = lldpcli(\"-f\",", "os import pytest import platform import time import shlex @pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\",", "'lldp.eth0.lldp-med.inventory.software' not in out.items() test_default_inventory(namespaces, lldpcli) custom_values = [ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision',", "test_default_inventory(namespaces, lldpcli) custom_values = [ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware',", "'lldp.eth0.lldp-med.inventory.firmware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in", "'Cybertron'), ('model', 'model', 'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877') ] with namespaces(2): for what, pfx,", "assert 'lldp.eth0.lldp-med.inventory.model' not in out.items() assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items() assert 'lldp.eth0.lldp-med.inventory.software' not", "('model', 'model', 'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877') ] with namespaces(2): for what, pfx, value", "else: assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.serial'", "lldpcli, namespaces, replace_file): with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for what, value in", "if os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com'", "== '1.10' assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model']", "out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware' not in", "== 0 result = lldpcli(\"resume\") assert result.returncode == 0 result = lldpcli(\"update\") assert", "out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release() else:", "result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode == 0 result = lldpcli(\"update\")", "lldpcli(\"resume\") assert result.returncode == 0 result = lldpcli(\"update\") assert result.returncode == 0 time.sleep(3)", "\\ platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items()", "not in config.lldpd.features\", reason=\"LLDP-MED not supported\") class TestConfigInventory(object): def test_configinventory(self, lldpd1, lldpd, lldpcli,", "assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware' not", "out.items() assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items() assert 'lldp.eth0.lldp-med.inventory.model' not in out.items() assert 'lldp.eth0.lldp-med.inventory.asset'", "('asset', 'asset', 'SQRT3_1.732050807568877') ] with namespaces(2): for what, pfx, value in custom_values: result", "def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces, replace_file): with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/*", "[ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'),", "== \\ platform.release() else: assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not in", "assert result.returncode == 0 result = lldpcli(\"update\") assert result.returncode == 0 time.sleep(3) with", "product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value) lldpd(\"-M\", \"1\") def test_default_inventory(namespaces, lldpcli): with namespaces(1):", "\"details\") for what, pfx, value in custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] ==", "{} {}\".format(what, value))) assert result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode ==", "pfx, value in custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] == value with namespaces(2):", "def test_default_inventory(namespaces, lldpcli): with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\",", "== value with namespaces(2): for what, pfx, value in custom_values: result = lldpcli(", "{}\".format(what, value))) assert result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode == 0", "out.items() test_default_inventory(namespaces, lldpcli) custom_values = [ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision',", "'model', 'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877') ] with namespaces(2): for what, pfx, value in", "assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert out['lldp.eth0.lldp-med.inventory.software'] == \\ platform.release()", "with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\", \"details\") assert out['lldp.eth0.chassis.name']", "/sys/class/dmi/id/* for what, value in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\", chassis_asset_tag=\"487122\").items(): replace_file(\"/sys/class/dmi/id/{}\".format(what), value)", "lldpcli( *shlex.split(\"configure inventory {} {}\".format(what, value))) assert result.returncode == 0 result = lldpcli(\"resume\")", "0 result = lldpcli(\"resume\") assert result.returncode == 0 result = lldpcli(\"update\") assert result.returncode", "for what, pfx, value in custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] == value", "namespaces(2): for what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"unconfigure inventory {}\".format(what)))", "what, pfx, value in custom_values: key_to_find = \"lldp.eth0.lldp-med.inventory.{}\".format(pfx) assert out[key_to_find] == value with", "{}\".format(what))) assert result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode == 0 result", "'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'), ('manufacturer', 'manufacturer', 'Cybertron'),", "in out.items() assert 'lldp.eth0.lldp-med.inventory.software' not in out.items() test_default_inventory(namespaces, lldpcli) custom_values = [ ('hardware-revision',", "not in out.items() assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items() assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items()", "out.items() assert 'lldp.eth0.lldp-med.inventory.software' not in out.items() test_default_inventory(namespaces, lldpcli) custom_values = [ ('hardware-revision', 'hardware',", "'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122' assert out['lldp.eth0.lldp-med.inventory.software'] == \\", "assert result.returncode == 0 result = lldpcli(\"resume\") assert result.returncode == 0 result =", "('manufacturer', 'manufacturer', 'Cybertron'), ('model', 'model', 'OptimusPrime'), ('asset', 'asset', 'SQRT3_1.732050807568877') ] with namespaces(2): for", "assert result.returncode == 0 result = lldpcli(\"update\") assert result.returncode == 0 test_default_inventory(namespaces, lldpcli)", "lldpd(\"-M\", \"1\") def test_default_inventory(namespaces, lldpcli): with namespaces(1): if os.path.isdir(\"/sys/class/dmi/id\"): out = lldpcli(\"-f\", \"keyvalue\",", "result.returncode == 0 result = lldpcli(\"update\") assert result.returncode == 0 time.sleep(3) with namespaces(1):", "assert result.returncode == 0 time.sleep(3) with namespaces(1): out = lldpcli(\"-f\", \"keyvalue\", \"show\", \"neighbors\",", "'asset', 'SQRT3_1.732050807568877') ] with namespaces(2): for what, pfx, value in custom_values: result =", "'45872512' assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \\ 'Spectacular' assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation' assert out['lldp.eth0.lldp-med.inventory.asset'] ==", "with namespaces(2): for what, pfx, value in custom_values: result = lldpcli( *shlex.split(\"configure inventory", "import pytest import platform import time import shlex @pytest.mark.skipif(\"'LLDP-MED' not in config.lldpd.features\", reason=\"LLDP-MED", "namespaces, replace_file): with namespaces(2): if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for what, value in dict(product_version=\"1.14\",", "('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial', 'FIBO_112358'), ('manufacturer',", "\"neighbors\", \"details\") assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com' assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14' assert out['lldp.eth0.lldp-med.inventory.firmware'] ==", "= [ ('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'), ('software-revision', 'software', 'E_2.7182818284590452354'), ('firmware-revision', 'firmware', 'PI_3.14159265358979323846'), ('serial', 'serial',", "if os.path.isdir(\"/sys/class/dmi/id\"): # /sys/class/dmi/id/* for what, value in dict(product_version=\"1.14\", bios_version=\"1.10\", product_serial=\"45872512\", sys_vendor=\"Spectacular\", product_name=\"Workstation\"," ]
[ "on_render=modify_max_length, ) class SchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\":", "\"string\", }, }, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_max_length,", "json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": { \"test_field\":", "set\" else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True) json_field", "modify_max_length(schema, ui_schema): import random max_length = random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"] =", "django.db import models from django_reactive.fields import ReactJSONSchemaField def modify_max_length(schema, ui_schema): import random max_length", "\"string\", }, }, \"additionalProperties\": False, } ) class OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={", "ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}},", "unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True) json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\":", "max_length = random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\" def modify_help_text(schema,", "schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\" def modify_help_text(schema, ui_schema, instance=None): if instance:", "\"string\"}}, }, blank=True, ) class ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\":", "import models from django_reactive.fields import ReactJSONSchemaField def modify_max_length(schema, ui_schema): import random max_length =", "\"type\": \"string\", }, }, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, },", "schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, ) class", "ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is set\" else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is unset\" class RenderMethodWithObjectSchemaModel(models.Model):", "\"Max 10\"}, }, on_render=modify_max_length, ) class SchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\",", "= \"Condition is set\" else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition", "json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, },", "\"type\": \"string\", }, }, \"additionalProperties\": False, } ) class OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField(", "ReactJSONSchemaField def modify_max_length(schema, ui_schema): import random max_length = random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length", "\"type\": \"string\", \"maxLength\": 10, \"minLength\": 5, }, \"another_test_field\": { \"type\": \"string\", }, },", "}, on_render=modify_max_length, ) class SchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\",", "def modify_max_length(schema, ui_schema): import random max_length = random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"]", "}, \"additionalProperties\": False, } ) class OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\",", "\"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max", "\"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model):", "random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\" def modify_help_text(schema, ui_schema, instance=None):", "}, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model): json_field =", "max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\" def modify_help_text(schema, ui_schema, instance=None): if instance: if instance.is_some_condition:", "class ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\":", "= max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\" def modify_help_text(schema, ui_schema, instance=None): if instance: if", "is_some_condition = models.BooleanField(default=True) json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"],", "models from django_reactive.fields import ReactJSONSchemaField def modify_max_length(schema, ui_schema): import random max_length = random.randint(20,", "instance=None): if instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is set\" else: ui_schema[\"test_field\"][\"ui:help\"] =", "{\"type\": \"string\"}}, }, blank=True, ) class ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\",", "from django_reactive.fields import ReactJSONSchemaField def modify_max_length(schema, ui_schema): import random max_length = random.randint(20, 30)", "instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is set\" else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is", "\"maxLength\": 10, \"minLength\": 5, }, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False,", "class SchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\":", "\"properties\": { \"test_field\": { \"type\": \"string\", \"maxLength\": 10, \"minLength\": 5, }, \"another_test_field\": {", "\"Condition is unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True) json_field = ReactJSONSchemaField( schema={ \"title\":", "}, }, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_help_text, )", "10, \"minLength\": 5, }, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, }", "ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": { \"test_field\": { \"type\":", "\"object\", \"required\": [\"test_field\"], \"properties\": { \"test_field\": { \"type\": \"string\", \"maxLength\": 10, \"minLength\": 5,", "ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField(", "\"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, ) class ExtraMediaSchemaModel(models.Model): json_field", "}, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_help_text, ) class", "5, }, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, } ) class", "ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\" def modify_help_text(schema, ui_schema, instance=None): if instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"]", "is unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True) json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\",", "instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is set\" else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is unset\" class", "\"test_field\": { \"type\": \"string\", \"maxLength\": 10, \"minLength\": 5, }, \"another_test_field\": { \"type\": \"string\",", "5, }, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, }, ui_schema={ \"test_field\":", "class RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\":", "\"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, } ) class OptionalSchemaModel(models.Model): json_field", "{ \"type\": \"string\", }, }, \"additionalProperties\": False, } ) class OptionalSchemaModel(models.Model): json_field =", ") class ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\":", "\"string\", }, }, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_help_text,", "ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, )", "if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is set\" else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is unset\"", "\"type\": \"object\", \"required\": [\"test_field\"], \"properties\": { \"test_field\": { \"type\": \"string\", \"maxLength\": 10, \"minLength\":", "{ \"test_field\": { \"type\": \"string\", \"maxLength\": 10, \"minLength\": 5, }, \"another_test_field\": { \"type\":", "f\"Max {max_length}\" def modify_help_text(schema, ui_schema, instance=None): if instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition", "{ \"type\": \"string\", \"maxLength\": 10, \"minLength\": 5, }, \"another_test_field\": { \"type\": \"string\", },", "= f\"Max {max_length}\" def modify_help_text(schema, ui_schema, instance=None): if instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] =", "= ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": { \"test_field\": {", "30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\" def modify_help_text(schema, ui_schema, instance=None): if", "10\"}, }, on_render=modify_max_length, ) class SchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\":", "= \"Condition is unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True) json_field = ReactJSONSchemaField( schema={", "= random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\" def modify_help_text(schema, ui_schema,", "RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {", "RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True) json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\":", "class OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\":", "= models.BooleanField(default=True) json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\":", "\"minLength\": 5, }, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, } )", "blank=True, ) class ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\":", "False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model): json_field", "schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": { \"test_field\": { \"type\": \"string\",", "} ) class OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\":", "ui_schema, instance=None): if instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is set\" else: ui_schema[\"test_field\"][\"ui:help\"]", "ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_max_length, ) class SchemaModel(models.Model): json_field = ReactJSONSchemaField(", "SchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {", "10, \"minLength\": 5, }, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, },", "{\"ui:help\": \"Max 10\"}, }, on_render=modify_max_length, ) class SchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\":", "\"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": { \"test_field\": { \"type\": \"string\", \"maxLength\": 10,", "on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\":", "models.BooleanField(default=True) json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {", "def modify_help_text(schema, ui_schema, instance=None): if instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is set\"", "from django.db import models from django_reactive.fields import ReactJSONSchemaField def modify_max_length(schema, ui_schema): import random", "\"minLength\": 5, }, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, }, ui_schema={", "\"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, ) class ExtraMediaSchemaModel(models.Model):", "[\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, ) class ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField(", "{ \"type\": \"string\", }, }, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"},", "is set\" else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True)", "import ReactJSONSchemaField def modify_max_length(schema, ui_schema): import random max_length = random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] =", "}, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, } ) class OptionalSchemaModel(models.Model):", "False, } ) class OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"],", "else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True) json_field =", "\"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, ) class ExtraMediaSchemaModel(models.Model): json_field =", "= ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True,", "if instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is set\" else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition", "[\"test_field\"], \"properties\": { \"test_field\": { \"type\": \"string\", \"maxLength\": 10, \"minLength\": 5, }, \"another_test_field\":", "{\"ui:help\": \"Max 10\"}, }, on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\":", "False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_max_length, ) class SchemaModel(models.Model): json_field", "ui_schema): import random max_length = random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max", ") class OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\":", "\"string\", \"maxLength\": 10, \"minLength\": 5, }, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\":", "modify_help_text(schema, ui_schema, instance=None): if instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is set\" else:", "\"Condition is set\" else: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition =", "\"required\": [\"test_field\"], \"properties\": { \"test_field\": { \"type\": \"string\", \"maxLength\": 10, \"minLength\": 5, },", "\"Max 10\"}, }, on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\",", "ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, extra_css=[\"path/to/my/css/file.css\"],", "\"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, extra_css=[\"path/to/my/css/file.css\"], extra_js=[\"path/to/my/js/file.js\"], )", "class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True) json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\",", "\"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={", "django_reactive.fields import ReactJSONSchemaField def modify_max_length(schema, ui_schema): import random max_length = random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"]", "}, \"another_test_field\": { \"type\": \"string\", }, }, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\":", "10\"}, }, on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\":", ") class SchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"],", "<gh_stars>10-100 from django.db import models from django_reactive.fields import ReactJSONSchemaField def modify_max_length(schema, ui_schema): import", "}, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_max_length, ) class SchemaModel(models.Model): json_field =", "}, }, \"additionalProperties\": False, } ) class OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\":", "}, blank=True, ) class ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"],", "\"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_max_length, ) class SchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={", "{max_length}\" def modify_help_text(schema, ui_schema, instance=None): if instance: if instance.is_some_condition: ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is", "schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, extra_css=[\"path/to/my/css/file.css\"], extra_js=[\"path/to/my/js/file.js\"],", "\"additionalProperties\": False, } ) class OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\":", "import random max_length = random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\"", "random max_length = random.randint(20, 30) schema[\"properties\"][\"test_field\"][\"maxLength\"] = max_length ui_schema[\"test_field\"][\"ui:help\"] = f\"Max {max_length}\" def", "\"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_max_length, ) class SchemaModel(models.Model):", "\"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": { \"test_field\": { \"type\": \"string\", \"maxLength\":", "}, }, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_max_length, )", "}, \"additionalProperties\": False, }, ui_schema={ \"test_field\": {\"ui:help\": \"Max 10\"}, }, on_render=modify_max_length, ) class", "ui_schema[\"test_field\"][\"ui:help\"] = \"Condition is unset\" class RenderMethodWithObjectSchemaModel(models.Model): is_some_condition = models.BooleanField(default=True) json_field = ReactJSONSchemaField(", "{\"test_field\": {\"type\": \"string\"}}, }, blank=True, ) class ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\":", "\"properties\": {\"test_field\": {\"type\": \"string\"}}, }, blank=True, ) class ExtraMediaSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={", ") class RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\", \"required\": [\"test_field\"],", "OptionalSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"type\": \"object\", \"required\": [\"test_field\"], \"properties\": {\"test_field\": {\"type\": \"string\"}},", "}, on_render=modify_help_text, ) class RenderMethodSchemaModel(models.Model): json_field = ReactJSONSchemaField( schema={ \"title\": \"TestSchema\", \"type\": \"object\"," ]
[ "for loader in plugin_loaders: plugin = loader.load(context) if plugin is None: continue plugins.append(plugin)", "0: return {} elif most_recent_num > 0: dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files =", "EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance = {} context = base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer,", "= plugin _logger.log_message_info(\"Done loading all plugins, now launching the tensorboard application\") return application.TensorBoardWSGI(plugins,", "% flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading", "def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS: 50} run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading", "None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done loading all plugins, now launching the", "[] for loader in plugin_loaders: plugin = loader.load(context) if plugin is None: continue", "min(most_recent_num, len(dir_list)) return {os.path.relpath(path, logdir): path for path in dir_list[-num_files:]} else: return {os.path.relpath(path,", "= {plugin_event_accumulator.TENSORS: 50} run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with the %d most", "run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with the %d most recent runs enabled", "base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = [] for loader in", "assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS: 50} run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with the", "from .logging import _logger import io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS:", "most_recent_num > 0: dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files = min(most_recent_num, len(dir_list)) return {os.path.relpath(path,", "plugin_loaders: plugin = loader.load(context) if plugin is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin", "from tensorboard.backend.event_processing import plugin_event_multiplexer from .logging import _logger import io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders,", "len(dir_list)) return {os.path.relpath(path, logdir): path for path in dir_list[-num_files:]} else: return {os.path.relpath(path, logdir):", "for path in dir_list[-num_files:]} else: return {os.path.relpath(path, logdir): path for path in io_helpers.get_run_paths(logdir)}", "in plugin_loaders: plugin = loader.load(context) if plugin is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] =", "continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done loading all plugins, now launching the tensorboard", "plugin_loaders, assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS: 50} run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with", "= {} context = base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins =", "= base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = [] for loader", "plugins.\") plugin_name_to_instance = {} context = base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title)", "assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = [] for loader in plugin_loaders: plugin = loader.load(context)", "= sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files = min(most_recent_num, len(dir_list)) return {os.path.relpath(path, logdir): path for path", "return {os.path.relpath(path, logdir): path for path in dir_list[-num_files:]} else: return {os.path.relpath(path, logdir): path", "_logger.log_message_info(\"loading EventMultiplexer with the %d most recent runs enabled by default\" % flags.enable_first_N_runs)", "return application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num == 0: return {} elif", "import plugin_event_accumulator from tensorboard.backend import application from tensorboard.backend.event_processing import plugin_event_multiplexer from .logging import", "plugin_name_to_instance = {} context = base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins", "= loader.load(context) if plugin is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done loading", "tensorboard application\") return application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num == 0: return", "return {} elif most_recent_num > 0: dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files = min(most_recent_num,", "_logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance = {} context = base_plugin.TBContext( flags=flags,", "by default\" % flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading", "application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num == 0: return {} elif most_recent_num", "dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files = min(most_recent_num, len(dir_list)) return {os.path.relpath(path, logdir): path for", "num_files = min(most_recent_num, len(dir_list)) return {os.path.relpath(path, logdir): path for path in dir_list[-num_files:]} else:", "50} run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with the %d most recent runs", "plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done loading all plugins, now launching the tensorboard application\")", "recent runs enabled by default\" % flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True,", "tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance = {} context", "loader in plugin_loaders: plugin = loader.load(context) if plugin is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name]", "from tensorboard.backend.event_processing import plugin_event_accumulator from tensorboard.backend import application from tensorboard.backend.event_processing import plugin_event_multiplexer from", "application\") return application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num == 0: return {}", "window_title=flags.window_title) plugins = [] for loader in plugin_loaders: plugin = loader.load(context) if plugin", "> 0: dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files = min(most_recent_num, len(dir_list)) return {os.path.relpath(path, logdir):", "loading all plugins, now launching the tensorboard application\") return application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir,", "base_plugin from tensorboard.backend.event_processing import plugin_event_accumulator from tensorboard.backend import application from tensorboard.backend.event_processing import plugin_event_multiplexer", "purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance = {} context =", "plugins, now launching the tensorboard application\") return application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num): if", ".logging import _logger import io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS: 50}", "from tensorboard.backend import application from tensorboard.backend.event_processing import plugin_event_multiplexer from .logging import _logger import", "default\" % flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\")", "<reponame>RMDev97/tensorboard-extensions import os from tensorboard.plugins import base_plugin from tensorboard.backend.event_processing import plugin_event_accumulator from tensorboard.backend", "tensorboard.backend.event_processing import plugin_event_accumulator from tensorboard.backend import application from tensorboard.backend.event_processing import plugin_event_multiplexer from .logging", "application from tensorboard.backend.event_processing import plugin_event_multiplexer from .logging import _logger import io_helpers def gr_tensorboard_wsgi(flags,", "context = base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = [] for", "plugin_event_multiplexer from .logging import _logger import io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance =", "def _getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num == 0: return {} elif most_recent_num > 0:", "key=os.path.getmtime) num_files = min(most_recent_num, len(dir_list)) return {os.path.relpath(path, logdir): path for path in dir_list[-num_files:]}", "plugin is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done loading all plugins, now", "flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = [] for loader in plugin_loaders:", "import application from tensorboard.backend.event_processing import plugin_event_multiplexer from .logging import _logger import io_helpers def", "import os from tensorboard.plugins import base_plugin from tensorboard.backend.event_processing import plugin_event_accumulator from tensorboard.backend import", "{os.path.relpath(path, logdir): path for path in dir_list[-num_files:]} else: return {os.path.relpath(path, logdir): path for", "max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance = {} context = base_plugin.TBContext(", "all plugins, now launching the tensorboard application\") return application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num):", "logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = [] for loader in plugin_loaders: plugin", "the %d most recent runs enabled by default\" % flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map,", "EventMultiplexer with the %d most recent runs enabled by default\" % flags.enable_first_N_runs) gr_multiplexer", "size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance = {}", "{} elif most_recent_num > 0: dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files = min(most_recent_num, len(dir_list))", "gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\")", "plugin = loader.load(context) if plugin is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done", "enabled by default\" % flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done", "plugins = [] for loader in plugin_loaders: plugin = loader.load(context) if plugin is", "%d most recent runs enabled by default\" % flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance,", "the tensorboard application\") return application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num == 0:", "flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading all", "if plugin is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done loading all plugins,", "plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done loading all plugins, now launching the tensorboard application\") return", "all plugins.\") plugin_name_to_instance = {} context = base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance,", "import base_plugin from tensorboard.backend.event_processing import plugin_event_accumulator from tensorboard.backend import application from tensorboard.backend.event_processing import", "_logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance = {} context = base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider,", "import io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS: 50} run_path_map = _getRunPathMapFromLogdir(flags.logdir,", "logdir): path for path in dir_list[-num_files:]} else: return {os.path.relpath(path, logdir): path for path", "plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance =", "sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files = min(most_recent_num, len(dir_list)) return {os.path.relpath(path, logdir): path for path in", "multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = [] for loader in plugin_loaders: plugin =", "elif most_recent_num > 0: dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files = min(most_recent_num, len(dir_list)) return", "_getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with the %d most recent runs enabled by default\"", "import _logger import io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS: 50} run_path_map", "flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with the %d most recent runs enabled by default\" %", "0: dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files = min(most_recent_num, len(dir_list)) return {os.path.relpath(path, logdir): path", "from tensorboard.plugins import base_plugin from tensorboard.backend.event_processing import plugin_event_accumulator from tensorboard.backend import application from", "plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = [] for loader in plugin_loaders: plugin = loader.load(context) if", "path for path in dir_list[-num_files:]} else: return {os.path.relpath(path, logdir): path for path in", "most_recent_num): if most_recent_num == 0: return {} elif most_recent_num > 0: dir_list =", "plugin_event_accumulator from tensorboard.backend import application from tensorboard.backend.event_processing import plugin_event_multiplexer from .logging import _logger", "loader.load(context) if plugin is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done loading all", "is None: continue plugins.append(plugin) plugin_name_to_instance[plugin.plugin_name] = plugin _logger.log_message_info(\"Done loading all plugins, now launching", "io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS: 50} run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs)", "{plugin_event_accumulator.TENSORS: 50} run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with the %d most recent", "_logger.log_message_info(\"Done loading all plugins, now launching the tensorboard application\") return application.TensorBoardWSGI(plugins, flags.path_prefix) def", "plugin _logger.log_message_info(\"Done loading all plugins, now launching the tensorboard application\") return application.TensorBoardWSGI(plugins, flags.path_prefix)", "== 0: return {} elif most_recent_num > 0: dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime) num_files", "{} context = base_plugin.TBContext( flags=flags, logdir=flags.logdir, multiplexer=gr_multiplexer, assets_zip_provider=assets_zip_provider, plugin_name_to_instance=plugin_name_to_instance, window_title=flags.window_title) plugins = []", "= [] for loader in plugin_loaders: plugin = loader.load(context) if plugin is None:", "gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS: 50} run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer", "size_guidance = {plugin_event_accumulator.TENSORS: 50} run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with the %d", "tensorboard.plugins import base_plugin from tensorboard.backend.event_processing import plugin_event_accumulator from tensorboard.backend import application from tensorboard.backend.event_processing", "_getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num == 0: return {} elif most_recent_num > 0: dir_list", "= plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads) _logger.log_message_info(\"Done loading EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance", "tensorboard.backend import application from tensorboard.backend.event_processing import plugin_event_multiplexer from .logging import _logger import io_helpers", "import plugin_event_multiplexer from .logging import _logger import io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance", "with the %d most recent runs enabled by default\" % flags.enable_first_N_runs) gr_multiplexer =", "flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num == 0: return {} elif most_recent_num >", "runs enabled by default\" % flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None, purge_orphaned_data=True, max_reload_threads=flags.max_reload_threads)", "= min(most_recent_num, len(dir_list)) return {os.path.relpath(path, logdir): path for path in dir_list[-num_files:]} else: return", "launching the tensorboard application\") return application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num ==", "= _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs) _logger.log_message_info(\"loading EventMultiplexer with the %d most recent runs enabled by", "most_recent_num == 0: return {} elif most_recent_num > 0: dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime)", "_logger import io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider): size_guidance = {plugin_event_accumulator.TENSORS: 50} run_path_map =", "most recent runs enabled by default\" % flags.enable_first_N_runs) gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map, size_guidance=size_guidance, tensor_size_guidance=None,", "now launching the tensorboard application\") return application.TensorBoardWSGI(plugins, flags.path_prefix) def _getRunPathMapFromLogdir(logdir, most_recent_num): if most_recent_num", "if most_recent_num == 0: return {} elif most_recent_num > 0: dir_list = sorted(io_helpers.get_run_paths(logdir),", "tensorboard.backend.event_processing import plugin_event_multiplexer from .logging import _logger import io_helpers def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):", "loading EventMultiplexer\") _logger.log_message_info(\"Loading all plugins.\") plugin_name_to_instance = {} context = base_plugin.TBContext( flags=flags, logdir=flags.logdir,", "os from tensorboard.plugins import base_plugin from tensorboard.backend.event_processing import plugin_event_accumulator from tensorboard.backend import application" ]
[ "in range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum = 0 with open(datFilePath, \"r\") as f:", "line.split(\" \") if contentList[0] != \"Time\": if nval == 1: for i in", "line in f: contentList = line.split(\" \") if contentList[0] != \"Time\": if nval", "Décompte du nombre d'éléments nval = 0 variableList = \"\" with open(datFilePath, \"r\")", "\"T\": if line != variableList: variableList = line # print variableList else: nval", "line != variableList: variableList = line # print variableList else: nval = nval", "print(\"Could not stat file\", datFilePath) raise NameError(\"File does not exist\") # Décompte du", "= \"\" with open(datFilePath, \"r\") as f: for line in f: if line[0]", "dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum] = np.nan linenum = linenum +", ") else: for i in range(len(variableList)): if i < len(contentList): dataStr = contentList[i].strip()", "= contentList[i].strip() if dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan else: dictionnaire[variableList[i].strip()][linenum] = eval(", "range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum = 0 with open(datFilePath, \"r\") as f: for", "if line != variableList: variableList = line # print variableList else: nval =", "= eval( contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum] = np.nan linenum = linenum + 1", "# -*- coding: utf-8 -*- import os import numpy as np def load_octmi_dat(acquisitionName,", "1 variableList = variableList.split(\" \") dictionnaire = dict() dictionnaire[\"nval\"] = nval if nval", "du nombre d'éléments nval = 0 variableList = \"\" with open(datFilePath, \"r\") as", "nombre d'éléments nval = 0 variableList = \"\" with open(datFilePath, \"r\") as f:", "i < len(contentList): dataStr = contentList[i].strip() if dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan", "if line[0] == \"T\": if line != variableList: variableList = line # print", "= np.zeros(nval) linenum = 0 with open(datFilePath, \"r\") as f: for line in", "for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = eval( contentList[i].strip() ) else: for i in", "as f: for line in f: if line[0] == \"T\": if line !=", "np.nan else: dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum] = np.nan linenum =", "for line in f: if line[0] == \"T\": if line != variableList: variableList", "dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum = 0 with open(datFilePath, \"r\") as f: for line", "eval( contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum] = np.nan linenum = linenum + 1 return", "1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum = 0 with open(datFilePath,", "as np def load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification de l'existence du fichier datFilePath =", "python # -*- coding: utf-8 -*- import os import numpy as np def", "1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = eval( contentList[i].strip() ) else: for i", "in f: if line[0] == \"T\": if line != variableList: variableList = line", "== \"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan else: dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum]", "== \"T\": if line != variableList: variableList = line # print variableList else:", "= dict() dictionnaire[\"nval\"] = nval if nval > 1: for i in range(len(variableList)):", "for i in range(len(variableList)): if i < len(contentList): dataStr = contentList[i].strip() if dataStr.lower()", "!= \"Time\": if nval == 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = eval(", "os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\") if not os.path.exists(datFilePath): print(\"Could not stat file\", datFilePath) raise", "\"r\") as f: for line in f: if line[0] == \"T\": if line", "= line.split(\" \") if contentList[0] != \"Time\": if nval == 1: for i", "\") if contentList[0] != \"Time\": if nval == 1: for i in range(len(variableList)):", "else: dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum] = np.nan linenum = linenum", "load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification de l'existence du fichier datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName +", "# Vérification de l'existence du fichier datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\") if", "nval if nval > 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum", "i in range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum = 0 with open(datFilePath, \"r\") as", "dictionnaire[\"nval\"] = nval if nval > 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] =", "linenum = 0 with open(datFilePath, \"r\") as f: for line in f: contentList", "np.zeros(nval) linenum = 0 with open(datFilePath, \"r\") as f: for line in f:", "dictionnaire[variableList[i].strip()] = eval( contentList[i].strip() ) else: for i in range(len(variableList)): if i <", "def load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification de l'existence du fichier datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName", "= 0 variableList = \"\" with open(datFilePath, \"r\") as f: for line in", "l'existence du fichier datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\") if not os.path.exists(datFilePath): print(\"Could", "exist\") # Décompte du nombre d'éléments nval = 0 variableList = \"\" with", "NameError(\"File does not exist\") # Décompte du nombre d'éléments nval = 0 variableList", "f: contentList = line.split(\" \") if contentList[0] != \"Time\": if nval == 1:", "utf-8 -*- import os import numpy as np def load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification", "\"_MI.dat\") if not os.path.exists(datFilePath): print(\"Could not stat file\", datFilePath) raise NameError(\"File does not", "file\", datFilePath) raise NameError(\"File does not exist\") # Décompte du nombre d'éléments nval", "contentList[0] != \"Time\": if nval == 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] =", "!= variableList: variableList = line # print variableList else: nval = nval +", "\"r\") as f: for line in f: contentList = line.split(\" \") if contentList[0]", "dict() dictionnaire[\"nval\"] = nval if nval > 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()]", "as f: for line in f: contentList = line.split(\" \") if contentList[0] !=", "variableList = variableList.split(\" \") dictionnaire = dict() dictionnaire[\"nval\"] = nval if nval >", "stat file\", datFilePath) raise NameError(\"File does not exist\") # Décompte du nombre d'éléments", "line # print variableList else: nval = nval + 1 variableList = variableList.split(\"", "for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum = 0 with open(datFilePath, \"r\")", "eval( contentList[i].strip() ) else: for i in range(len(variableList)): if i < len(contentList): dataStr", "dataStr = contentList[i].strip() if dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan else: dictionnaire[variableList[i].strip()][linenum] =", "# print variableList else: nval = nval + 1 variableList = variableList.split(\" \")", "i in range(len(variableList)): dictionnaire[variableList[i].strip()] = eval( contentList[i].strip() ) else: for i in range(len(variableList)):", "acquisitionName + \"_MI.dat\") if not os.path.exists(datFilePath): print(\"Could not stat file\", datFilePath) raise NameError(\"File", "= 0 with open(datFilePath, \"r\") as f: for line in f: contentList =", "os.path.exists(datFilePath): print(\"Could not stat file\", datFilePath) raise NameError(\"File does not exist\") # Décompte", "# Décompte du nombre d'éléments nval = 0 variableList = \"\" with open(datFilePath,", "Vérification de l'existence du fichier datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\") if not", "contentList = line.split(\" \") if contentList[0] != \"Time\": if nval == 1: for", "not stat file\", datFilePath) raise NameError(\"File does not exist\") # Décompte du nombre", "nval > 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum = 0", "open(datFilePath, \"r\") as f: for line in f: contentList = line.split(\" \") if", "nval = 0 variableList = \"\" with open(datFilePath, \"r\") as f: for line", "open(datFilePath, \"r\") as f: for line in f: if line[0] == \"T\": if", "0 with open(datFilePath, \"r\") as f: for line in f: contentList = line.split(\"", "in range(len(variableList)): dictionnaire[variableList[i].strip()] = eval( contentList[i].strip() ) else: for i in range(len(variableList)): if", "dictionnaire = dict() dictionnaire[\"nval\"] = nval if nval > 1: for i in", "> 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum = 0 with", "de l'existence du fichier datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\") if not os.path.exists(datFilePath):", "numpy as np def load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification de l'existence du fichier datFilePath", "-*- coding: utf-8 -*- import os import numpy as np def load_octmi_dat(acquisitionName, basePath=\".\"):", "in f: contentList = line.split(\" \") if contentList[0] != \"Time\": if nval ==", "else: nval = nval + 1 variableList = variableList.split(\" \") dictionnaire = dict()", "i in range(len(variableList)): if i < len(contentList): dataStr = contentList[i].strip() if dataStr.lower() ==", "contentList[i].strip() if dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan else: dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip()", "= variableList.split(\" \") dictionnaire = dict() dictionnaire[\"nval\"] = nval if nval > 1:", "variableList.split(\" \") dictionnaire = dict() dictionnaire[\"nval\"] = nval if nval > 1: for", "+ \"_MI.dat\") if not os.path.exists(datFilePath): print(\"Could not stat file\", datFilePath) raise NameError(\"File does", "dictionnaire[variableList[i].strip()][linenum] = np.nan else: dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum] = np.nan", "len(contentList): dataStr = contentList[i].strip() if dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan else: dictionnaire[variableList[i].strip()][linenum]", "import os import numpy as np def load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification de l'existence", "datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\") if not os.path.exists(datFilePath): print(\"Could not stat file\",", "variableList = line # print variableList else: nval = nval + 1 variableList", "with open(datFilePath, \"r\") as f: for line in f: contentList = line.split(\" \")", "\"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan else: dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum] =", "np def load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification de l'existence du fichier datFilePath = os.path.join(os.path.normpath(basePath),", "nval + 1 variableList = variableList.split(\" \") dictionnaire = dict() dictionnaire[\"nval\"] = nval", "du fichier datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\") if not os.path.exists(datFilePath): print(\"Could not", "variableList: variableList = line # print variableList else: nval = nval + 1", "variableList = \"\" with open(datFilePath, \"r\") as f: for line in f: if", "\") dictionnaire = dict() dictionnaire[\"nval\"] = nval if nval > 1: for i", "import numpy as np def load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification de l'existence du fichier", "= eval( contentList[i].strip() ) else: for i in range(len(variableList)): if i < len(contentList):", "for line in f: contentList = line.split(\" \") if contentList[0] != \"Time\": if", "= os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\") if not os.path.exists(datFilePath): print(\"Could not stat file\", datFilePath)", "0 variableList = \"\" with open(datFilePath, \"r\") as f: for line in f:", "raise NameError(\"File does not exist\") # Décompte du nombre d'éléments nval = 0", "-*- import os import numpy as np def load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification de", "d'éléments nval = 0 variableList = \"\" with open(datFilePath, \"r\") as f: for", "coding: utf-8 -*- import os import numpy as np def load_octmi_dat(acquisitionName, basePath=\".\"): #", "variableList else: nval = nval + 1 variableList = variableList.split(\" \") dictionnaire =", "does not exist\") # Décompte du nombre d'éléments nval = 0 variableList =", "contentList[i].strip() ) else: for i in range(len(variableList)): if i < len(contentList): dataStr =", "dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan else: dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip() ) else:", "contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum] = np.nan linenum = linenum + 1 return dictionnaire", "#! /usr/bin/env python # -*- coding: utf-8 -*- import os import numpy as", "= line # print variableList else: nval = nval + 1 variableList =", "if not os.path.exists(datFilePath): print(\"Could not stat file\", datFilePath) raise NameError(\"File does not exist\")", "f: for line in f: if line[0] == \"T\": if line != variableList:", "line in f: if line[0] == \"T\": if line != variableList: variableList =", "if nval == 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = eval( contentList[i].strip() )", "else: for i in range(len(variableList)): if i < len(contentList): dataStr = contentList[i].strip() if", "line[0] == \"T\": if line != variableList: variableList = line # print variableList", "= nval if nval > 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval)", "if contentList[0] != \"Time\": if nval == 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()]", "< len(contentList): dataStr = contentList[i].strip() if dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan else:", "os import numpy as np def load_octmi_dat(acquisitionName, basePath=\".\"): # Vérification de l'existence du", "/usr/bin/env python # -*- coding: utf-8 -*- import os import numpy as np", "basePath=\".\"): # Vérification de l'existence du fichier datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\")", "not exist\") # Décompte du nombre d'éléments nval = 0 variableList = \"\"", "\"\" with open(datFilePath, \"r\") as f: for line in f: if line[0] ==", "+ 1 variableList = variableList.split(\" \") dictionnaire = dict() dictionnaire[\"nval\"] = nval if", "if dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum] = np.nan else: dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip() )", "fichier datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + \"_MI.dat\") if not os.path.exists(datFilePath): print(\"Could not stat", "not os.path.exists(datFilePath): print(\"Could not stat file\", datFilePath) raise NameError(\"File does not exist\") #", "with open(datFilePath, \"r\") as f: for line in f: if line[0] == \"T\":", "if i < len(contentList): dataStr = contentList[i].strip() if dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum] =", "== 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = eval( contentList[i].strip() ) else: for", "f: for line in f: contentList = line.split(\" \") if contentList[0] != \"Time\":", "datFilePath) raise NameError(\"File does not exist\") # Décompte du nombre d'éléments nval =", "nval = nval + 1 variableList = variableList.split(\" \") dictionnaire = dict() dictionnaire[\"nval\"]", "if nval > 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = np.zeros(nval) linenum =", "nval == 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = eval( contentList[i].strip() ) else:", "range(len(variableList)): dictionnaire[variableList[i].strip()] = eval( contentList[i].strip() ) else: for i in range(len(variableList)): if i", "in range(len(variableList)): if i < len(contentList): dataStr = contentList[i].strip() if dataStr.lower() == \"nan\":", "= np.nan else: dictionnaire[variableList[i].strip()][linenum] = eval( contentList[i].strip() ) else: dictionnaire[variableList[i].strip()][linenum] = np.nan linenum", "range(len(variableList)): if i < len(contentList): dataStr = contentList[i].strip() if dataStr.lower() == \"nan\": dictionnaire[variableList[i].strip()][linenum]", "= nval + 1 variableList = variableList.split(\" \") dictionnaire = dict() dictionnaire[\"nval\"] =", "f: if line[0] == \"T\": if line != variableList: variableList = line #", "\"Time\": if nval == 1: for i in range(len(variableList)): dictionnaire[variableList[i].strip()] = eval( contentList[i].strip()", "print variableList else: nval = nval + 1 variableList = variableList.split(\" \") dictionnaire" ]